repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/eplb_utils.py
tests/distributed/eplb_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import random import torch import torch.multiprocessing as mp from vllm.distributed.parallel_state import ( init_distributed_environment, ) from vllm.utils.system_utils import update_environment_variables mp.set_start_method("spawn", force=True) def distributed_run(fn, world_size, *args): number_of_processes = world_size processes: list[mp.Process] = [] for i in range(number_of_processes): env: dict[str, str] = {} env["RANK"] = str(i) env["LOCAL_RANK"] = str(i) env["WORLD_SIZE"] = str(number_of_processes) env["LOCAL_WORLD_SIZE"] = str(number_of_processes) env["MASTER_ADDR"] = "localhost" env["MASTER_PORT"] = "12345" p = mp.Process(target=fn, args=(env, world_size, *args)) processes.append(p) p.start() for p in processes: p.join() for p in processes: assert p.exitcode == 0 def set_env_vars_and_device(env: dict[str, str]) -> None: update_environment_variables(env) local_rank = os.environ["LOCAL_RANK"] device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) init_distributed_environment() # Ensure each worker process has the same random seed random.seed(42) torch.manual_seed(42)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_pp_cudagraph.py
tests/distributed/test_pp_cudagraph.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from typing_extensions import LiteralString from ..utils import compare_two_settings, create_new_process_for_each_test @pytest.mark.parametrize( "PP_SIZE, MODEL_NAME", [ (2, "JackFram/llama-160m"), ], ) @pytest.mark.parametrize( "ATTN_BACKEND", [ "FLASH_ATTN", ], ) @create_new_process_for_each_test() def test_pp_cudagraph( PP_SIZE: int, MODEL_NAME: str, ATTN_BACKEND: LiteralString, ): cudagraph_args = [ # use half precision for speed and memory savings in CI environment "--dtype", "float16", "--pipeline-parallel-size", str(PP_SIZE), "--distributed-executor-backend", "mp", f"--attention-backend={ATTN_BACKEND}", ] eager_args = cudagraph_args + ["--enforce-eager"] compare_two_settings(MODEL_NAME, eager_args, cudagraph_args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_sequence_parallel.py
tests/distributed/test_sequence_parallel.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ WARNING: This test runs in both single-node (4 GPUs) and multi-node (2 node with 2 GPUs each) modes. If the test only uses 2 GPUs, it is important to set the distributed backend to "mp" to avoid Ray scheduling all workers in a node other than the head node, which can cause the test to fail. """ import json import os from dataclasses import dataclass from typing import Literal, NamedTuple import pytest from vllm.config.compilation import CompilationMode from vllm.config.model import RunnerOption from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.utils.torch_utils import is_torch_equal_or_newer from ..models.registry import HF_EXAMPLE_MODELS from ..utils import compare_two_settings, create_new_process_for_each_test logger = init_logger("test_sequence_parallel") VLLM_MULTI_NODE = os.getenv("VLLM_MULTI_NODE", "0") == "1" class ParallelSetup(NamedTuple): tp_size: int pp_size: int fuse_norm_quant: bool fuse_act_quant: bool eager_mode: bool chunked_prefill: bool class SPTestOptions(NamedTuple): multi_node_only: bool load_format: str | None = None @dataclass class SPTestSettings: parallel_setups: list[ParallelSetup] distributed_backends: list[str] runner: RunnerOption test_options: SPTestOptions @staticmethod def detailed( *, tp_base: int = 2, pp_base: int = 1, multi_node_only: bool = False, runner: RunnerOption = "auto", load_format: str | None = None, ): parallel_setups = [] for eager_mode_val in [False, True]: for pp_multiplier in [1, 2]: for chunked_prefill_val in [False, True]: parallel_setups.append( ParallelSetup( tp_size=tp_base, pp_size=pp_multiplier * pp_base, fuse_norm_quant=False, fuse_act_quant=False, eager_mode=eager_mode_val, chunked_prefill=chunked_prefill_val, ) ) return SPTestSettings( parallel_setups=parallel_setups, distributed_backends=["mp", "ray"], runner=runner, test_options=SPTestOptions( multi_node_only=multi_node_only, load_format=load_format ), ) @staticmethod def fast( *, tp_base: int = 2, pp_base: int = 1, runner: RunnerOption = "auto", multi_node_only: bool = False, load_format: str | None = None, ): parallel_setups = [] for eager_mode_val in [False, True]: for pp_multiplier in [1, 2]: for chunked_prefill_val in [False, True]: parallel_setups.append( ParallelSetup( tp_size=tp_base, pp_size=pp_multiplier * pp_base, fuse_norm_quant=False, fuse_act_quant=False, eager_mode=eager_mode_val, chunked_prefill=chunked_prefill_val, ) ) return SPTestSettings( parallel_setups=parallel_setups, distributed_backends=["mp", "ray"], runner=runner, test_options=SPTestOptions( multi_node_only=multi_node_only, load_format=load_format ), ) @staticmethod def fp8_quant( *, tp_base: int = 2, pp_base: int = 1, runner: RunnerOption = "auto", multi_node_only: bool = False, load_format: str | None = None, ): parallel_setups = [] for fusion_val in [False, True]: parallel_setups.append( ParallelSetup( tp_size=tp_base, pp_size=pp_base, fuse_norm_quant=fusion_val, fuse_act_quant=fusion_val, eager_mode=True, chunked_prefill=False, ) ) return SPTestSettings( parallel_setups=parallel_setups, distributed_backends=["mp", "ray"], runner=runner, test_options=SPTestOptions( multi_node_only=multi_node_only, load_format=load_format ), ) def iter_params(self, model_id: str): opts = self.test_options for parallel_setup in self.parallel_setups: for backend in self.distributed_backends: yield ( model_id, parallel_setup, backend, self.runner, opts, ) def _compare_sp( model_id: str, parallel_setup: ParallelSetup, distributed_backend: str, runner: RunnerOption, test_options: SPTestOptions, num_gpus_available: int, use_inductor_graph_partition: bool, fuse_gemm_comms: bool, *, method: Literal["generate", "encode"], is_multimodal: bool, ): ( tp_size, pp_size, fuse_norm_quant, fuse_act_quant, eager_mode, chunked_prefill, ) = parallel_setup multi_node_only, load_format = test_options model_info = HF_EXAMPLE_MODELS.find_hf_info(model_id) model_info.check_transformers_version(on_fail="skip") trust_remote_code = model_info.trust_remote_code tokenizer_mode = model_info.tokenizer_mode hf_overrides = model_info.hf_overrides require_embed_inputs = model_info.require_embed_inputs if load_format == "dummy": # Avoid OOM text_overrides = { "num_hidden_layers": 4, "hidden_size": 512, "intermediate_size": 800, "num_attention_heads": 4, "num_key_value_heads": 1, } if is_multimodal: hf_overrides.update({"text_config": text_overrides}) else: hf_overrides.update(text_overrides) else: model_info.check_available_online(on_fail="skip") if num_gpus_available < tp_size * pp_size: pytest.skip(f"Need at least {tp_size} x {pp_size} GPUs") if VLLM_MULTI_NODE and distributed_backend == "mp": pytest.skip( "Skipping multi-node pipeline parallel test for " "multiprocessing distributed backend" ) if multi_node_only and not VLLM_MULTI_NODE: pytest.skip("Not in multi-node setting") common_args = [ # use half precision for speed and memory savings in CI environment "--dtype", "float16", "--max-model-len", "2048", "--max-num-seqs", "8", ] if chunked_prefill: common_args.append("--enable-chunked-prefill") if eager_mode: common_args.append("--enforce-eager") if runner != "auto": common_args.extend(["--runner", runner]) if trust_remote_code: common_args.append("--trust-remote-code") if tokenizer_mode: common_args.extend(["--tokenizer-mode", tokenizer_mode]) if load_format: common_args.extend(["--load-format", load_format]) if hf_overrides: common_args.extend(["--hf-overrides", json.dumps(hf_overrides)]) if require_embed_inputs: common_args.extend( [ "--skip-tokenizer-init", "--enable-prompt-embeds", "--enable-mm-embeds", ] ) compilation_config = { "mode": CompilationMode.VLLM_COMPILE, "compile_sizes": [4, 8], "pass_config": { "enable_sp": True, "fuse_gemm_comms": fuse_gemm_comms, "fuse_norm_quant": fuse_norm_quant, "fuse_act_quant": fuse_act_quant, "eliminate_noops": True, }, "use_inductor_graph_partition": use_inductor_graph_partition, } tp_sp_args = [ *common_args, "--tensor-parallel-size", str(tp_size), "--pipeline-parallel-size", str(pp_size), "--distributed-executor-backend", distributed_backend, "--compilation_config", json.dumps(compilation_config), ] tp_args = [ *common_args, "--tensor-parallel-size", str(tp_size), "--distributed-executor-backend", "mp", ] compare_two_settings(model_id, tp_sp_args, tp_args, method=method) SP_TEXT_GENERATION_MODELS = { # [Decoder-only] "hmellor/tiny-random-LlamaForCausalLM": SPTestSettings.fast(), "RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8": SPTestSettings.fp8_quant(), } SP_TEST_MODELS = [ # TODO support other models # [LANGUAGE GENERATION] "hmellor/tiny-random-LlamaForCausalLM", "RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8", ] @pytest.mark.parametrize( ( "model_id", "parallel_setup", "distributed_backend", "runner", "test_options", ), [ params for model_id, settings in SP_TEXT_GENERATION_MODELS.items() for params in settings.iter_params(model_id) if model_id in SP_TEST_MODELS ], ) @pytest.mark.parametrize("use_inductor_graph_partition", [True, False]) @pytest.mark.parametrize("fuse_gemm_comms", [False]) # TODO: enable async TP @create_new_process_for_each_test() def test_tp_sp_generation( model_id: str, parallel_setup: ParallelSetup, distributed_backend: str, runner: RunnerOption, test_options: SPTestOptions, num_gpus_available, use_inductor_graph_partition: bool, fuse_gemm_comms: bool, ): if use_inductor_graph_partition and not is_torch_equal_or_newer("2.9.0.dev"): pytest.skip("inductor graph partition is only available in PyTorch 2.9+") # Skip FP8 SP-only test on sm89 (compute capability 8.9) if ( "fp8" in model_id.lower() and current_platform.get_device_capability() < (9, 0) and (not fuse_gemm_comms) ): pytest.skip("FP8 reduction support begins with sm90 capable devices.") _compare_sp( model_id, parallel_setup, distributed_backend, runner, test_options, num_gpus_available, use_inductor_graph_partition, fuse_gemm_comms=fuse_gemm_comms, method="generate", is_multimodal=False, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_expert_parallel.py
tests/distributed/test_expert_parallel.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from dataclasses import dataclass from typing import Literal, NamedTuple import pytest from vllm.config.model import RunnerOption from vllm.logger import init_logger from ..utils import compare_two_settings, create_new_process_for_each_test logger = init_logger("test_expert_parallel") class ParallelSetup(NamedTuple): tp_size: int eager_mode: bool chunked_prefill: bool class EPTestOptions(NamedTuple): trust_remote_code: bool tokenizer_mode: str | None load_format: str | None = None hf_overrides: str | None = None @dataclass class EPTestSettings: parallel_setups: list[ParallelSetup] distributed_backends: list[str] runner: RunnerOption test_options: EPTestOptions @staticmethod def detailed( *, tp_base: int = 2, runner: RunnerOption = "auto", trust_remote_code: bool = False, tokenizer_mode: str | None = None, load_format: str | None = None, hf_overrides: str | None = None, ): return EPTestSettings( parallel_setups=[ ParallelSetup(tp_size=tp_base, eager_mode=False, chunked_prefill=False), ParallelSetup(tp_size=tp_base, eager_mode=False, chunked_prefill=True), ParallelSetup(tp_size=tp_base, eager_mode=True, chunked_prefill=False), ParallelSetup( tp_size=2 * tp_base, eager_mode=False, chunked_prefill=True ), ParallelSetup( tp_size=2 * tp_base, eager_mode=True, chunked_prefill=False ), ], distributed_backends=["mp", "ray"], runner=runner, test_options=EPTestOptions( trust_remote_code=trust_remote_code, tokenizer_mode=tokenizer_mode, load_format=load_format, hf_overrides=hf_overrides, ), ) @staticmethod def fast( *, tp_base: int = 2, runner: RunnerOption = "auto", trust_remote_code: bool = False, tokenizer_mode: str | None = None, load_format: str | None = None, hf_overrides: str | None = None, ): return EPTestSettings( parallel_setups=[ ParallelSetup(tp_size=tp_base, eager_mode=True, chunked_prefill=False), ], distributed_backends=["mp"], runner=runner, test_options=EPTestOptions( trust_remote_code=trust_remote_code, tokenizer_mode=tokenizer_mode, load_format=load_format, hf_overrides=hf_overrides, ), ) def iter_params(self, model_name: str): opts = self.test_options for parallel_setup in self.parallel_setups: for distributed_backend in self.distributed_backends: yield ( model_name, parallel_setup, distributed_backend, self.runner, opts, ) # NOTE: You can adjust tp_base locally to fit the model in GPU # The values displayed here are only a rough indicator of the size of the model TEST_MODELS = { "deepseek-ai/DeepSeek-V2-Lite-Chat": EPTestSettings.fast(trust_remote_code=True), "mistralai/Mixtral-8x7B-Instruct-v0.1": EPTestSettings.fast(tp_base=4), } def _compare_tp( model_name: str, parallel_setup: ParallelSetup, distributed_backend: str, runner: RunnerOption, test_options: EPTestOptions, num_gpus_available: int, *, method: Literal["generate"], ): ( tp_size, eager_mode, chunked_prefill, ) = parallel_setup ( trust_remote_code, tokenizer_mode, load_format, hf_overrides, ) = test_options if num_gpus_available < tp_size: pytest.skip(f"Need at least {tp_size} GPUs") common_args = [ # use half precision for speed and memory savings in CI environment "--dtype", "float16", "--max-model-len", "2048", "--max-num-seqs", "8", "--load-format", "auto", ] if chunked_prefill: common_args.append("--enable-chunked-prefill") if eager_mode: common_args.append("--enforce-eager") if runner != "auto": common_args.extend(["--runner", runner]) if trust_remote_code: common_args.append("--trust-remote-code") if tokenizer_mode: common_args.extend(["--tokenizer-mode", tokenizer_mode]) if load_format: common_args.extend(["--load-format", load_format]) if hf_overrides: common_args.extend(["--hf-overrides", hf_overrides]) ep_env = { "VLLM_TEST_ENABLE_EP": "1", } ep_args = [ *common_args, "--tensor-parallel-size", str(tp_size), "--distributed-executor-backend", distributed_backend, ] # compare without expert parallelism tp_env = { "VLLM_TEST_ENABLE_EP": "0", } tp_args = [ *common_args, "--tensor-parallel-size", str(tp_size), "--distributed-executor-backend", "mp", ] try: compare_two_settings( model_name, ep_args, tp_args, ep_env, tp_env, method=method, max_wait_seconds=360, ) except Exception: raise @pytest.mark.parametrize( ("model_name", "parallel_setup", "distributed_backend", "runner", "test_options"), [ params for model_name, settings in TEST_MODELS.items() for params in settings.iter_params(model_name) ], ) @create_new_process_for_each_test() def test_ep( model_name: str, parallel_setup: ParallelSetup, distributed_backend: str, runner: RunnerOption, test_options: EPTestOptions, num_gpus_available, ): _compare_tp( model_name, parallel_setup, distributed_backend, runner, test_options, num_gpus_available, method="generate", )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_expert_placement.py
tests/distributed/test_expert_placement.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from vllm.model_executor.layers.fused_moe.layer import determine_expert_map def verify_round_robin_pattern(expert_map, ep_rank, ep_size, global_num_experts): """Verify that the expert map follows the round_robin pattern.""" # Calculate expected local experts (supporting non-divisible cases) base_experts = global_num_experts // ep_size remainder = global_num_experts % ep_size local_num_experts = base_experts + 1 if ep_rank < remainder else base_experts # Expected expert IDs for this rank in round_robin pattern # For non-divisible cases, ranks with extra experts start earlier expected_expert_ids = [] for expert_idx in range(local_num_experts): global_expert_id = ep_rank + expert_idx * ep_size expected_expert_ids.append(global_expert_id) # Check that only expected experts are mapped to this rank for global_expert_id in range(global_num_experts): if global_expert_id in expected_expert_ids: local_expert_id = expert_map[global_expert_id] expected_local_id = expected_expert_ids.index(global_expert_id) assert local_expert_id == expected_local_id, ( f"Global expert {global_expert_id} should map to local expert " f"{expected_local_id}, got {local_expert_id}" ) else: assert expert_map[global_expert_id] == -1, ( f"Global expert {global_expert_id} should not be mapped to this rank" ) # Verify that all local expert IDs are consecutive starting from 0 local_expert_ids = [expert_map[global_id] for global_id in expected_expert_ids] expected_local_ids = list(range(local_num_experts)) assert local_expert_ids == expected_local_ids, ( f"Expected local expert IDs {expected_local_ids}, got {local_expert_ids}" ) @pytest.mark.parametrize("expert_placement_strategy", ["round_robin"]) @pytest.mark.parametrize("world_size", [2, 4]) def test_expert_placement_various_sizes(expert_placement_strategy, world_size): """Test round_robin expert placement with various expert counts.""" # Test with different global_num_experts values # Include both divisible and non-divisible cases if world_size == 2: test_cases = [ (4, 2), # 4 experts (divisible) (8, 2), # 8 experts (divisible) (9, 2), # 9 experts (non-divisible) (16, 2), # 16 experts (divisible) (17, 2), # 17 experts (non-divisible) ] elif world_size == 4: test_cases = [ (8, 4), # 8 experts (divisible) (16, 4), # 16 experts (divisible) (18, 4), # 18 experts (non-divisible) (32, 4), # 32 experts (divisible) (33, 4), # 33 experts (non-divisible) ] else: test_cases = [] for test_global_experts, test_ep_size in test_cases: # Ensure ep_size matches world_size assert test_ep_size == world_size, ( f"ep_size {test_ep_size} must equal world_size {world_size}" ) # Test each rank for ep_rank in range(world_size): # Calculate expected local experts base_experts = test_global_experts // test_ep_size remainder = test_global_experts % test_ep_size if ep_rank < remainder: expected_test_local = base_experts + 1 else: expected_test_local = base_experts test_local_experts, test_expert_map, _ = determine_expert_map( ep_size=test_ep_size, ep_rank=ep_rank, global_num_experts=test_global_experts, expert_placement_strategy=expert_placement_strategy, ) assert test_local_experts == expected_test_local, ( f"For {test_global_experts} experts on {test_ep_size} ranks, " f"rank {ep_rank}: expected {expected_test_local} local" f"experts, got {test_local_experts}" ) if test_expert_map is not None: assert test_expert_map.shape == (test_global_experts,), ( f"Expected expert map shape ({test_global_experts},), " f"got {test_expert_map.shape}" ) # Verify round_robin pattern for this test case verify_round_robin_pattern( test_expert_map, ep_rank, test_ep_size, test_global_experts ) @pytest.mark.parametrize("expert_placement_strategy", ["round_robin"]) @pytest.mark.parametrize("world_size", [2, 4]) def test_expert_placement_edge_cases(expert_placement_strategy, world_size): """Test edge cases for round_robin expert placement.""" # Test case 1: ep_size = 1 (should return None for expert_map) local_num_experts, expert_map, _ = determine_expert_map( ep_size=1, ep_rank=0, global_num_experts=8, expert_placement_strategy=expert_placement_strategy, ) assert local_num_experts == 8, "For ep_size=1, should get all experts" assert expert_map is None, "For ep_size=1, expert_map should be None" # Test case 2: ep_size = 0 (should raise assertion) with pytest.raises(AssertionError): determine_expert_map( ep_size=0, ep_rank=0, global_num_experts=8, expert_placement_strategy=expert_placement_strategy, ) def test_determine_expert_map_comprehensive(): """Test of determine_expert_map function with various configurations.""" # Test cases: (ep_size, ep_rank, global_num_experts, # expert_placement_strategy, expected_local, expected_map_pattern) test_cases = [ # Round robin placement tests ( 2, 0, 8, "round_robin", 4, [0, -1, 1, -1, 2, -1, 3, -1], ), # rank 0 gets even experts ( 2, 1, 8, "round_robin", 4, [-1, 0, -1, 1, -1, 2, -1, 3], ), # rank 1 gets odd experts ( 2, 0, 9, "round_robin", 5, [0, -1, 1, -1, 2, -1, 3, -1, 4], ), # rank 0 gets 5 experts (even + last) ( 2, 1, 9, "round_robin", 4, [-1, 0, -1, 1, -1, 2, -1, 3, -1], ), # rank 1 gets 4 experts (odd) # 4-rank tests ( 4, 0, 8, "round_robin", 2, [0, -1, -1, -1, 1, -1, -1, -1], ), # rank 0 gets experts 0, 4 ( 4, 1, 8, "round_robin", 2, [-1, 0, -1, -1, -1, 1, -1, -1], ), # rank 1 gets experts 1, 5 ( 4, 2, 8, "round_robin", 2, [-1, -1, 0, -1, -1, -1, 1, -1], ), # rank 2 gets experts 2, 6 ( 4, 3, 8, "round_robin", 2, [-1, -1, -1, 0, -1, -1, -1, 1], ), # rank 3 gets experts 3, 7 ] for ( ep_size, ep_rank, global_num_experts, expert_placement_strategy, expected_local, expected_map_pattern, ) in test_cases: local_num_experts, expert_map, _ = determine_expert_map( ep_size=ep_size, ep_rank=ep_rank, global_num_experts=global_num_experts, expert_placement_strategy=expert_placement_strategy, ) assert local_num_experts == expected_local, ( f"ep_size={ep_size}, ep_rank={ep_rank}, " f"global_num_experts={global_num_experts}, " f"expert_placement_strategy={expert_placement_strategy}: " f"expected {expected_local} local experts, got {local_num_experts}" ) if expected_map_pattern is None: assert expert_map is None, "Expected expert_map to be None" else: assert expert_map is not None, "Expected expert_map to not be None" actual_map = expert_map.tolist() assert actual_map == expected_map_pattern, ( f"ep_size={ep_size}, ep_rank={ep_rank}, " f"global_num_experts={global_num_experts}, " f"expert_placement_strategy={expert_placement_strategy}: " f"expected map {expected_map_pattern}, got {actual_map}" )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_comm_ops.py
tests/distributed/test_comm_ops.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Test the communication operators. Run `pytest tests/distributed/test_comm_ops.py`. """ from collections.abc import Callable from typing import Any import pytest import ray import torch from vllm.distributed import ( broadcast_tensor_dict, get_pp_group, tensor_model_parallel_all_gather, tensor_model_parallel_all_reduce, tensor_model_parallel_reduce_scatter, ) from ..utils import ( init_test_distributed_environment, multi_gpu_test, multi_process_parallel, ) @ray.remote(num_gpus=1, max_calls=1) def all_reduce_test_worker( monkeypatch: pytest.MonkeyPatch, tp_size: int, pp_size: int, rank: int, distributed_init_port: str, ): # it is important to delete the CUDA_VISIBLE_DEVICES environment variable # so that each worker can see all the GPUs # they will be able to set the device to the correct GPU monkeypatch.delenv("CUDA_VISIBLE_DEVICES", raising=False) device = torch.device(f"cuda:{rank}") torch.cuda.set_device(device) init_test_distributed_environment(tp_size, pp_size, rank, distributed_init_port) num_elements = 8 all_tensors = [ torch.arange(num_elements, dtype=torch.float32, device="cuda") * (r + 1) for r in range(tp_size) ] expected = torch.sum(torch.stack(all_tensors, dim=0), dim=0) t = all_tensors[rank % tp_size] t = tensor_model_parallel_all_reduce(t) torch.testing.assert_close(t, expected) @ray.remote(num_gpus=1, max_calls=1) def reduce_scatter_test_worker( monkeypatch: pytest.MonkeyPatch, tp_size: int, pp_size: int, rank: int, distributed_init_port: str, ): # it is important to delete the CUDA_VISIBLE_DEVICES environment variable # so that each worker can see all the GPUs # they will be able to set the device to the correct GPU monkeypatch.delenv("CUDA_VISIBLE_DEVICES", raising=False) device = torch.device(f"cuda:{rank}") torch.cuda.set_device(device) init_test_distributed_environment(tp_size, pp_size, rank, distributed_init_port) num_elements = 8 all_tensors = [ torch.arange(num_elements, dtype=torch.float32, device="cuda") * (r + 1) for r in range(tp_size) ] index = rank % tp_size partition_size = num_elements // tp_size all_reduce = torch.sum(torch.stack(all_tensors, dim=0), dim=0) expected = all_reduce[index * partition_size : (index + 1) * partition_size] t = all_tensors[index] t = tensor_model_parallel_reduce_scatter(t, 0) torch.testing.assert_close(t, expected) @ray.remote(num_gpus=1, max_calls=1) def all_gather_test_worker( monkeypatch: pytest.MonkeyPatch, tp_size: int, pp_size: int, rank: int, distributed_init_port: str, ): # it is important to delete the CUDA_VISIBLE_DEVICES environment variable # so that each worker can see all the GPUs # they will be able to set the device to the correct GPU monkeypatch.delenv("CUDA_VISIBLE_DEVICES", raising=False) device = torch.device(f"cuda:{rank}") torch.cuda.set_device(device) init_test_distributed_environment(tp_size, pp_size, rank, distributed_init_port) num_dimensions = 3 tensor_size = list(range(2, num_dimensions + 2)) total_size = 1 for s in tensor_size: total_size *= s for all_gather_dimension in range(num_dimensions): all_tensors = [ torch.arange(total_size, dtype=torch.float32, device="cuda").reshape( tensor_size ) * (r + 1) for r in range(tp_size) ] expected = torch.cat(all_tensors, dim=all_gather_dimension) t = all_tensors[rank % tp_size] t = tensor_model_parallel_all_gather(t, all_gather_dimension) torch.testing.assert_close(t, expected) @ray.remote(num_gpus=1, max_calls=1) def broadcast_tensor_dict_test_worker( monkeypatch: pytest.MonkeyPatch, tp_size: int, pp_size: int, rank: int, distributed_init_port: str, ): # it is important to delete the CUDA_VISIBLE_DEVICES environment variable # so that each worker can see all the GPUs # they will be able to set the device to the correct GPU monkeypatch.delenv("CUDA_VISIBLE_DEVICES", raising=False) device = torch.device(f"cuda:{rank}") torch.cuda.set_device(device) init_test_distributed_environment(tp_size, pp_size, rank, distributed_init_port) test_dict = { # device tensor "a": torch.arange(8, dtype=torch.float32, device="cuda"), # CPU tensor "b": torch.arange(16, dtype=torch.int8, device="cpu"), "c": "test", "d": [1, 2, 3], "e": {"a": 1, "b": 2}, # empty tensor "f": torch.tensor([], dtype=torch.float32, device="cuda"), } if (rank % tp_size) == 0: broadcast_tensor_dict(test_dict, src=0) else: recv_dict = broadcast_tensor_dict(src=0) assert len(recv_dict) == len(test_dict) torch.testing.assert_close(recv_dict["a"], test_dict["a"]) torch.testing.assert_close(recv_dict["b"], test_dict["b"]) assert recv_dict["c"] == test_dict["c"] assert recv_dict["d"] == test_dict["d"] assert recv_dict["e"] == test_dict["e"] torch.testing.assert_close(recv_dict["f"], test_dict["f"]) @ray.remote(num_gpus=1, max_calls=1) def send_recv_tensor_dict_test_worker( monkeypatch: pytest.MonkeyPatch, tp_size: int, pp_size: int, rank: int, distributed_init_port: str, ): monkeypatch.delenv("CUDA_VISIBLE_DEVICES", raising=False) device = torch.device(f"cuda:{rank}") torch.cuda.set_device(device) init_test_distributed_environment(tp_size, pp_size, rank, distributed_init_port) test_dict = { # device tensor "a": torch.arange(8, dtype=torch.float32, device="cuda"), # CPU tensor "b": torch.arange(16, dtype=torch.int8, device="cpu"), "c": "test", "d": [1, 2, 3], "e": {"a": 1, "b": 2}, # empty tensor "f": torch.tensor([], dtype=torch.float32, device="cuda"), } if not get_pp_group().is_first_rank: recv_dict = get_pp_group().recv_tensor_dict() if not get_pp_group().is_last_rank: get_pp_group().send_tensor_dict(test_dict) if not get_pp_group().is_first_rank: assert len(recv_dict) == len(test_dict) torch.testing.assert_close(recv_dict["a"], test_dict["a"]) torch.testing.assert_close(recv_dict["b"], test_dict["b"]) assert recv_dict["c"] == test_dict["c"] assert recv_dict["d"] == test_dict["d"] assert recv_dict["e"] == test_dict["e"] torch.testing.assert_close(recv_dict["f"], test_dict["f"]) @ray.remote(num_gpus=1, max_calls=1) def send_recv_test_worker( monkeypatch: pytest.MonkeyPatch, tp_size: int, pp_size: int, rank: int, distributed_init_port: str, ): monkeypatch.delenv("CUDA_VISIBLE_DEVICES", raising=False) device = torch.device(f"cuda:{rank}") torch.cuda.set_device(device) init_test_distributed_environment(tp_size, pp_size, rank, distributed_init_port) size = 64 test_tensor = torch.arange(64, dtype=torch.float32, device="cuda") if not get_pp_group().is_first_rank: recv_tensor = get_pp_group().recv(size, dtype=torch.float32) if not get_pp_group().is_last_rank: get_pp_group().send(test_tensor) if not get_pp_group().is_first_rank: torch.testing.assert_close(test_tensor, recv_tensor) @multi_gpu_test(num_gpus=2) @pytest.mark.parametrize("tp_size", [2]) @pytest.mark.parametrize( "test_target", [all_reduce_test_worker, all_gather_test_worker, broadcast_tensor_dict_test_worker], ) def test_multi_process_tensor_parallel( monkeypatch: pytest.MonkeyPatch, tp_size: int, test_target: Callable[..., Any], ): multi_process_parallel(monkeypatch, tp_size, 1, test_target) @multi_gpu_test(num_gpus=2) @pytest.mark.parametrize("pp_size", [2]) @pytest.mark.parametrize( "test_target", [send_recv_test_worker, send_recv_tensor_dict_test_worker] ) def test_multi_process_pipeline_parallel( monkeypatch: pytest.MonkeyPatch, pp_size: int, test_target: Callable[..., Any], ): multi_process_parallel(monkeypatch, 1, pp_size, test_target) @multi_gpu_test(num_gpus=4) @pytest.mark.parametrize("tp_size", [2]) @pytest.mark.parametrize("pp_size", [2]) @pytest.mark.parametrize( "test_target", [ send_recv_test_worker, send_recv_tensor_dict_test_worker, all_reduce_test_worker, all_gather_test_worker, broadcast_tensor_dict_test_worker, ], ) def test_multi_process_tensor_parallel_pipeline_parallel( tp_size: int, pp_size: int, test_target: Callable[..., Any], monkeypatch: pytest.MonkeyPatch, ): multi_process_parallel(monkeypatch, tp_size, pp_size, test_target)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_distributed_oot.py
tests/distributed/test_distributed_oot.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from ..entrypoints.openai.test_oot_registration import run_and_test_dummy_opt_api_server def test_distributed_oot(dummy_opt_path: str): run_and_test_dummy_opt_api_server(dummy_opt_path, tp=2)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_torchrun_example.py
tests/distributed/test_torchrun_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # unit test for `examples/offline_inference/torchrun_example.py` import os import random import torch.distributed as dist from vllm import LLM, SamplingParams from vllm.distributed.parallel_state import get_world_group dist.init_process_group(backend="gloo") # Create prompts prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] sampling_params = SamplingParams(temperature=0.8, top_p=0.95) # set different `gpu_memory_utilization` and `swap_space` for different ranks, # to test if all ranks agree on the same kv cache configuration. llm = LLM( model="facebook/opt-125m", tensor_parallel_size=2, pipeline_parallel_size=int(os.getenv("PP_SIZE", 1)), distributed_executor_backend="external_launcher", gpu_memory_utilization=random.uniform(0.7, 0.9), swap_space=random.randint(1, 4), seed=0, ) outputs = llm.generate(prompts, sampling_params) cpu_group = get_world_group().cpu_group torch_rank = dist.get_rank(group=cpu_group) def test_consistent_across_ranks(obj): if torch_rank == 0: dist.broadcast_object_list([obj], src=0, group=cpu_group) else: container = [None] dist.broadcast_object_list(container, src=0, group=cpu_group) assert container[0] == obj test_consistent_across_ranks(llm.llm_engine.vllm_config.cache_config.num_cpu_blocks) test_consistent_across_ranks(llm.llm_engine.vllm_config.cache_config.num_gpu_blocks) # make sure we can access the model parameters from the calling process # of the `LLM` instance. params = list( llm.llm_engine.model_executor.driver_worker.worker.model_runner.model.parameters() ) test_consistent_across_ranks(len(params)) # all ranks should have the same outputs for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text test_consistent_across_ranks(prompt) test_consistent_across_ranks(generated_text) print(f"Rank {torch_rank}, Prompt: {prompt!r}, Generated text: {generated_text!r}")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/distributed/test_events.py
tests/distributed/test_events.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import threading import time import msgspec import pytest from vllm.distributed.kv_events import ( EventBatch, EventPublisherFactory, NullEventPublisher, ) DP_RANK = 0 class EventSample( msgspec.Struct, tag=True, # type: ignore array_like=True, # type: ignore ): """Test event for publisher testing""" id: int value: str class SampleBatch(EventBatch): """Test event batch for publisher testing""" events: list[EventSample] def create_test_events(count: int) -> SampleBatch: """Create a batch of test events""" events = [EventSample(id=i, value=f"test-{i}") for i in range(count)] return SampleBatch(ts=time.time(), events=events) def test_basic_publishing(publisher, subscriber): """Test basic event publishing works""" test_batch = create_test_events(5) publisher.publish(test_batch) result = subscriber.receive_one(timeout=1000) assert result is not None, "No message received" seq, received = result assert seq == 0, "Sequence number mismatch" assert received.ts == pytest.approx(test_batch.ts, abs=0.1), "Timestamp mismatch" assert len(received.events) == len(test_batch.events), "Number of events mismatch" for i, event in enumerate(received.events): assert event.id == i, "Event id mismatch" assert event.value == f"test-{i}", "Event value mismatch" def test_multiple_events(publisher, subscriber): """Test publishing and receiving multiple event batches""" for _ in range(10): batch = create_test_events(2) publisher.publish(batch) received = [] for _ in range(10): data = subscriber.receive_one(timeout=100) if data: received.append(data) assert len(received) == 10, "Number of messages mismatch" seqs = [seq for seq, _ in received] assert seqs == list(range(10)), "Sequence numbers mismatch" def test_replay_mechanism(publisher, subscriber): """Test the replay mechanism works correctly""" for _ in range(19): batch = create_test_events(1) publisher.publish(batch) time.sleep(0.5) # Need publisher to process above requests subscriber.request_replay(10) batch = create_test_events(1) publisher.publish(batch) # 20th message replayed = subscriber.receive_replay() assert len(replayed) > 0, "No replayed messages received" seqs = [seq for seq, _ in replayed] assert all(seq >= 10 for seq in seqs), "Replayed messages not in order" assert seqs == list(range(min(seqs), max(seqs) + 1)), ( "Replayed messages not consecutive" ) def test_buffer_limit(publisher, subscriber, publisher_config): """Test buffer limit behavior""" buffer_size = publisher_config.buffer_steps # Publish more events than the buffer can hold for i in range(buffer_size + 10): batch = create_test_events(1) publisher.publish(batch) time.sleep(0.5) # Need publisher to process above requests subscriber.request_replay(0) batch = create_test_events(1) publisher.publish(batch) replayed = subscriber.receive_replay() assert len(replayed) <= buffer_size, "Can't replay more than buffer size" oldest_seq = min(seq for seq, _ in replayed) assert oldest_seq >= 10, "The oldest sequence should be at least 10" def test_topic_filtering(publisher_config): """ Test that a subscriber only receives messages matching its topic filter """ publisher_config.replay_endpoint = None publisher_config.topic = "foo" pub = EventPublisherFactory.create(publisher_config, DP_RANK) from .conftest import MockSubscriber sub_foo = MockSubscriber(publisher_config.endpoint, None, "foo") sub_bar = MockSubscriber(publisher_config.endpoint, None, "bar") try: time.sleep(0.1) for _ in range(3): pub.publish(create_test_events(1)) foo_received = [sub_foo.receive_one(timeout=200) for _ in range(3)] assert all(msg is not None for msg in foo_received), ( "Subscriber with matching topic should receive messages" ) bar_received = [sub_bar.receive_one(timeout=200) for _ in range(3)] assert all(msg is None for msg in bar_received), ( "Subscriber with non-matching topic should receive no messages" ) finally: pub.shutdown() sub_foo.close() sub_bar.close() def test_high_volume(publisher, subscriber): """Test publishing and receiving a high volume of events""" num_batches = 10_000 events_per_batch = 100 # Publish events in a separate thread to not block def publish_events(): for i in range(num_batches): batch = create_test_events(events_per_batch) publisher.publish(batch) # Small delay to avoid overwhelming if i % 100 == 0: time.sleep(0.01) received: list[tuple[int, SampleBatch]] = [] publisher_thread = threading.Thread(target=publish_events) publisher_thread.start() start_time = time.time() while len(received) < num_batches: if time.time() - start_time > 10: # Timeout after 10 seconds break result = subscriber.receive_one(timeout=100) if result: received.append(result) publisher_thread.join() assert len(received) >= num_batches * 0.9, "We should have received most messages" seqs = [seq for seq, _ in received] assert sorted(seqs) == seqs, "Sequence numbers should be in order" def test_null_publisher(): """Test that NullEventPublisher can be used without errors""" publisher = NullEventPublisher(DP_RANK) # This should not raise any errors batch = create_test_events(5) publisher.publish(batch) publisher.shutdown() def test_data_parallel_rank_tagging(publisher_config): """Test that events are properly tagged with their data parallel rank""" publisher_config.topic = "foo" pub_0 = EventPublisherFactory.create(publisher_config, DP_RANK) pub_1 = EventPublisherFactory.create(publisher_config, DP_RANK + 1) # Hardcode the expected endpoints based on port offsetting behavior # Both ranks get offsets according to _offset_endpoint_port function base_endpoint = publisher_config.endpoint if "tcp://" in base_endpoint: # For TCP endpoints: tcp://localhost:5557 -> tcp://localhost:5557, tcp://localhost:5558 expected_endpoint_0 = base_endpoint # rank 0 gets port + 0 = same port expected_endpoint_1 = base_endpoint.replace( ":5557", ":5558" ) # rank 1 gets port + 1 else: # For inproc endpoints: inproc://test -> inproc://test_dp0, inproc://test_dp1 expected_endpoint_0 = base_endpoint # rank 0 gets base expected_endpoint_1 = base_endpoint + "_dp1" # rank 1 gets _dp1 from .conftest import MockSubscriber sub_0 = MockSubscriber(expected_endpoint_0, None, publisher_config.topic) sub_1 = MockSubscriber(expected_endpoint_1, None, publisher_config.topic) try: time.sleep(0.1) # Let publishers start up # Publish events from different ranks batch_0 = create_test_events(2) batch_1 = create_test_events(3) pub_0.publish(batch_0) pub_1.publish(batch_1) # Receive events from rank 0 result_0 = sub_0.receive_one(timeout=200) assert result_0 is not None, "No message received from rank 0" seq_0, received_0 = result_0 # Receive events from rank 1 result_1 = sub_1.receive_one(timeout=200) assert result_1 is not None, "No message received from rank 1" seq_1, received_1 = result_1 # Verify DP rank tagging assert received_0.data_parallel_rank == 0, ( f"Expected DP rank 0, got {received_0.data_parallel_rank}" ) assert received_1.data_parallel_rank == 1, ( f"Expected DP rank 1, got {received_1.data_parallel_rank}" ) # Verify event content is correct assert len(received_0.events) == 2, "Wrong number of events from rank 0" assert len(received_1.events) == 3, "Wrong number of events from rank 1" finally: pub_0.shutdown() pub_1.shutdown() sub_0.close() sub_1.close() def test_event_publisher_factory(): """Test event publisher factory creation behavior under different configurations""" from vllm.config.kv_events import KVEventsConfig from vllm.distributed.kv_events import ZmqEventPublisher # test config is None publisher = EventPublisherFactory.create(None, DP_RANK) assert isinstance(publisher, NullEventPublisher) publisher.shutdown() # test disable kv cache events config = KVEventsConfig( enable_kv_cache_events=False, publisher="zmq", # Even if zmq is specified, should return NullEventPublisher endpoint="tcp://localhost:5557", ) publisher = EventPublisherFactory.create(config, DP_RANK) assert isinstance(publisher, NullEventPublisher) publisher.shutdown() # test zmq publisher config = KVEventsConfig( enable_kv_cache_events=True, publisher="zmq", endpoint="inproc://test-factory-true", ) publisher = EventPublisherFactory.create(config, DP_RANK) assert isinstance(publisher, ZmqEventPublisher) publisher.shutdown() # test unknown publisher with pytest.raises(ValueError, match="Input should be"): KVEventsConfig( enable_kv_cache_events=True, publisher="unknown_publisher", endpoint="tcp://localhost:5557", ) # test publisher not specified config = KVEventsConfig( enable_kv_cache_events=True, # publisher not specified, should default to "zmq" endpoint="tcp://localhost:5557", ) publisher = EventPublisherFactory.create(config, DP_RANK) assert isinstance(publisher, ZmqEventPublisher) publisher.shutdown()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/weight_loading/test_weight_loading.py
tests/weight_loading/test_weight_loading.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import pytest import torch from vllm.platforms import current_platform MAX_MODEL_LEN = 1024 MODEL_NAME = os.environ.get( "MODEL_NAME", "robertgshaw2/zephyr-7b-beta-channelwise-gptq" ) REVISION = os.environ.get("REVISION", "main") QUANTIZATION = os.environ.get("QUANTIZATION", "gptq_marlin") MIN_CAPABILITY = os.environ.get("MIN_CAPABILITY", "80") @pytest.mark.skipif( MODEL_NAME == "casperhansen/deepseek-coder-v2-instruct-awq", reason="OOM in the CI" ) @pytest.mark.skipif( not current_platform.has_device_capability(int(MIN_CAPABILITY)), reason="Current system does not have minimum capability.", ) def test_weight_loading(vllm_runner): """ Test parameter weight loading with tp>1. """ # MoE models need fp16. NEEDS_FP16 = ( QUANTIZATION == "gptq" or MODEL_NAME == "nm-testing/test-w4a16-mixtral-actorder-group" ) with vllm_runner( model_name=MODEL_NAME, revision=REVISION, dtype=torch.half if NEEDS_FP16 else "auto", quantization=None if QUANTIZATION == "None" else QUANTIZATION, max_model_len=MAX_MODEL_LEN, tensor_parallel_size=2, ) as model: output = model.generate_greedy("Hello world!", max_tokens=20) print(output) assert output
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/rocm/aiter/test_grouped_quant.py
tests/rocm/aiter/test_grouped_quant.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # This is a test for the AITER group_fp8_quant op. # It tests if the AITER op is # 1. correctly defined the relationship between # implementation and fake function # 2. can be used with torch.compile # 3. can be used with CUDA graphs # This file will be skipped if AITER is not installed # and the platform is not ROCm. import importlib.util import pytest import torch # this import statement is needed to ensure the ops are registered from vllm._aiter_ops import rocm_aiter_ops from vllm.platforms import current_platform # Check if aiter package is installed aiter_available = importlib.util.find_spec("aiter") is not None pytestmark = pytest.mark.skipif( not (current_platform.is_rocm() and aiter_available), reason="AITER ops are only available on ROCm with aiter package installed", ) def test_rocm_aiter_group_fp8_quant_fake_implementation(): """Test that the fake implementation is correctly defined for torch.ops.vllm.rocm_aiter_group_fp8_quant.""" # Create test tensors M = 128 N = 4096 group_size = 128 input_tensor = torch.randn((M, N), dtype=torch.bfloat16, device="cuda") # Verify the op's fake implementation using torch.library.opcheck # This checks that the fake function returns tensors with correct shapes and dtypes torch.library.opcheck( torch.ops.vllm.rocm_aiter_group_fp8_quant, (input_tensor, group_size), test_utils=("test_faketensor",), ) def test_rocm_aiter_group_fp8_quant_torch_compile_with_cudagraph(): """Test that rocm_aiter_ops.group_fp8_quant with group size 128 can be used with torch.compile in cudagraph mode.""" # Create test tensors M = 128 N = 4096 group_size = 128 input_tensor = torch.randn((M, N), dtype=torch.bfloat16, device="cuda") # Define a function that uses the op def group_fp8_quant_fn(x): return rocm_aiter_ops.group_fp8_quant(x, group_size) # Compile with cudagraph mode compiled_fn = torch.compile( group_fp8_quant_fn, fullgraph=True, backend="inductor", mode="reduce-overhead", dynamic=False, ) # Run eager mode x_fp8_eager, scales_eager = group_fp8_quant_fn(input_tensor) # Run compiled version (first run will trigger compilation) x_fp8_compiled, scales_compiled = compiled_fn(input_tensor) # Verify shapes match assert x_fp8_compiled.shape == x_fp8_eager.shape assert scales_compiled.shape == scales_eager.shape # Verify expected shapes assert x_fp8_compiled.shape == (M, N) expected_scale_cols = (N + group_size - 1) // group_size assert scales_compiled.shape == (M, expected_scale_cols) # Verify results match assert torch.allclose( x_fp8_compiled.to(torch.float32), x_fp8_eager.to(torch.float32), rtol=1e-2, atol=1e-2, ) assert torch.allclose(scales_compiled, scales_eager, rtol=1e-3, atol=1e-3) # Test with different input (reusing compiled graph) input_tensor_2 = torch.randn((M, N), dtype=torch.bfloat16, device="cuda") x_fp8_eager_2, scales_eager_2 = group_fp8_quant_fn(input_tensor_2) x_fp8_compiled_2, scales_compiled_2 = compiled_fn(input_tensor_2) # Verify second run also produces correct results assert torch.allclose( x_fp8_compiled_2.to(torch.float32), x_fp8_eager_2.to(torch.float32), rtol=1e-2, atol=1e-2, ) assert torch.allclose(scales_compiled_2, scales_eager_2, rtol=1e-3, atol=1e-3) def test_rocm_aiter_group_fp8_quant_different_shapes(): """Test rocm_aiter_ops.group_fp8_quant with different input shapes.""" group_size = 128 test_shapes = [ (64, 2048), (256, 8192), (32, 1024), (512, 4096), ] for M, N in test_shapes: input_tensor = torch.randn((M, N), dtype=torch.bfloat16, device="cuda") x_fp8, scales = rocm_aiter_ops.group_fp8_quant(input_tensor, group_size) # Verify shapes assert x_fp8.shape == (M, N) expected_scale_cols = (N + group_size - 1) // group_size assert scales.shape == (M, expected_scale_cols) # Verify dtypes from aiter import dtypes assert x_fp8.dtype == dtypes.fp8 assert scales.dtype == torch.float32
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_minimax_m2_append_reasoning_parser.py
tests/reasoning/test_minimax_m2_append_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "minimax_m2_append_think" end_token = "</think>" # MiniMax M2 model path REASONING_MODEL_NAME = "MiniMaxAI/MiniMax-M2" @pytest.fixture(scope="module") def minimax_m2_tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) # ============================================================================= # MiniMaxM2AppendThinkReasoningParser behavior: # - Prepends <think> to the beginning of the output # - Does NOT separate reasoning and content # - Returns everything as content (with <think> prepended) # - reasoning is always None # # This parser is used when you want to keep the raw output with <think> added # ============================================================================= # Case: simple output with end token SIMPLE_OUTPUT = { "output": "This is reasoning</think>This is response", "reasoning": None, "content": "<think>This is reasoning</think>This is response", "is_reasoning_end": True, } # Case: output without end token (reasoning in progress) NO_END_TOKEN = { "output": "This is reasoning in progress", "reasoning": None, "content": "<think>This is reasoning in progress", "is_reasoning_end": False, } # Case: only end token ONLY_END_TOKEN = { "output": "</think>This is response", "reasoning": None, "content": "<think></think>This is response", "is_reasoning_end": True, } # Case: multiple lines MULTIPLE_LINES = { "output": "Line 1\nLine 2</think>Response 1\nResponse 2", "reasoning": None, "content": "<think>Line 1\nLine 2</think>Response 1\nResponse 2", "is_reasoning_end": True, } # Case: empty output (non-streaming prepends <think>) EMPTY = { "output": "", "reasoning": None, "content": "<think>", "is_reasoning_end": False, } # Case: empty output streaming (no tokens = no output) EMPTY_STREAMING = { "output": "", "reasoning": None, "content": None, "is_reasoning_end": False, } # Case: special characters SPECIAL_CHARS = { "output": "Let me think... 1+1=2</think>Yes!", "reasoning": None, "content": "<think>Let me think... 1+1=2</think>Yes!", "is_reasoning_end": True, } # Case: code in output CODE_OUTPUT = { "output": "```python\nprint('hi')\n```</think>Here's the code.", "reasoning": None, "content": "<think>```python\nprint('hi')\n```</think>Here's the code.", "is_reasoning_end": True, } TEST_CASES = [ pytest.param( False, SIMPLE_OUTPUT, id="simple_output", ), pytest.param( True, SIMPLE_OUTPUT, id="simple_output_streaming", ), pytest.param( False, NO_END_TOKEN, id="no_end_token", ), pytest.param( True, NO_END_TOKEN, id="no_end_token_streaming", ), pytest.param( False, ONLY_END_TOKEN, id="only_end_token", ), pytest.param( True, ONLY_END_TOKEN, id="only_end_token_streaming", ), pytest.param( False, MULTIPLE_LINES, id="multiple_lines", ), pytest.param( True, MULTIPLE_LINES, id="multiple_lines_streaming", ), pytest.param( False, EMPTY, id="empty", ), pytest.param( True, EMPTY_STREAMING, id="empty_streaming", ), pytest.param( False, SPECIAL_CHARS, id="special_chars", ), pytest.param( True, SPECIAL_CHARS, id="special_chars_streaming", ), pytest.param( False, CODE_OUTPUT, id="code_output", ), pytest.param( True, CODE_OUTPUT, id="code_output_streaming", ), ] @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, minimax_m2_tokenizer, ): output = minimax_m2_tokenizer.tokenize(param_dict["output"]) # decode everything to tokens output_tokens: list[str] = [ minimax_m2_tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( minimax_m2_tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"] # Test is_reasoning_end output_ids = minimax_m2_tokenizer.convert_tokens_to_ids(output) is_reasoning_end = parser.is_reasoning_end(output_ids) assert is_reasoning_end == param_dict["is_reasoning_end"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_granite_reasoning_parser.py
tests/reasoning/test_granite_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import DeltaMessage, run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "granite" START_REASONING = "Here is my thought process:" START_RESPONSE = "Here is my response:" SIMPLE_REASONING = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}This is the rest", # noqa: E501 "reasoning": "This is a reasoning section", "content": "This is the rest", } COMPLETE_REASONING = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}", "reasoning": "This is a reasoning section", "content": None, } NO_REASONING = { "output": "This is content", "reasoning": None, "content": "This is content", } MULTIPLE_LINES = { "output": f"{START_REASONING}This\nThat{START_RESPONSE}This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", } REASONING_WITH_THINK = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}This is the rest", # noqa: E501 "reasoning": "This is a reasoning section", "content": "This is the rest", } COMPLETE_REASONING_WITH_THINK = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}", "reasoning": "This is a reasoning section", "content": None, } MULTIPLE_LINES_WITH_THINK = { "output": f"{START_REASONING}This\nThat{START_RESPONSE}This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", } TEST_CASES = [ pytest.param( False, SIMPLE_REASONING, id="simple_reasoning", ), pytest.param( False, COMPLETE_REASONING, id="complete_reasoning", ), pytest.param( False, NO_REASONING, id="no_reasoning", ), pytest.param( False, MULTIPLE_LINES, id="multiple_lines", ), pytest.param( False, REASONING_WITH_THINK, id="reasoning_with_think", ), pytest.param( False, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think", ), pytest.param( False, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think", ), pytest.param( True, SIMPLE_REASONING, id="simple_reasoning_streaming", ), pytest.param( True, COMPLETE_REASONING, id="complete_reasoning_streaming", ), pytest.param( True, NO_REASONING, id="no_reasoning_streaming", ), pytest.param( True, MULTIPLE_LINES, id="multiple_lines_streaming", ), pytest.param( True, REASONING_WITH_THINK, id="reasoning_with_think_streaming", ), pytest.param( True, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think_streaming", ), pytest.param( True, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think_streaming", ), ] # Global tokenizer initialization to avoid repeated loading tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, ): output = tokenizer.tokenize(param_dict["output"]) # decode everything to tokens output_tokens: list[str] = [ tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"] # Additional tests for verifying the correctness of granite streaming; this # is complicated because granite uses multiple tokens to indicate when thinking # is starting / when it's starting its response, so skipping special tokens # is awkward. ### Handling the start of reasoning STREAMING_1 = { "previous_text": None, "current_text": "Here", "delta_text": "Here", "reasoning": None, "content": None, } # When we fail, we should give what was previously being silenced first STREAMING_2 = { "previous_text": "Here is my thought", "current_text": "Here is my thought failure", "delta_text": " failure", "reasoning": None, "content": "Here is my thought failure", } # But then after the first one, we should only add the delta text to content STREAMING_3 = { "previous_text": "Here wrong", "current_text": " words", "delta_text": " Here wrong words", "reasoning": None, "content": " words", } # But then after the first one, we should only add the delta text to content STREAMING_4 = { "previous_text": "Here is my thought", "current_text": "Here is my thought process:", "delta_text": " process:", "reasoning": None, "content": None, } # Reasoning started successfully; parse reasoning content STREAMING_5 = { "previous_text": "Here is my thought process:", "current_text": "Here is my thought process: foo", "delta_text": " foo", "reasoning": " foo", "content": None, } # Response special sequence has started, but not finished. STREAMING_6 = { "previous_text": "Here is my thought process: foo", "current_text": "Here is my thought process: foo Here is", "delta_text": " Here is", "reasoning": " ", "content": None, } # Response special sequence started, but was broken; the reasoning # content should be the content that was previously unused. STREAMING_7 = { "previous_text": "Here is my thought process: foo Here is", "current_text": "Here is my thought process: foo Here is Here", "delta_text": " Here", "reasoning": "Here is ", "content": None, } # Response special sequence is ongoing STREAMING_8 = { "previous_text": "Here is my thought process: foo Here is my response:", "current_text": "Here is my thought process: foo Here is my response: bar", "delta_text": " bar", "reasoning": None, "content": " bar", } # The delta text has everything; we should be able to correctly parse both STREAMING_9 = { "previous_text": None, "current_text": "Here is my thought process: foo Here is my response: bar", "delta_text": "Here is my thought process: foo Here is my response: bar", "reasoning": " foo ", "content": " bar", } ## The Response is ongoing, and the delta mixes reasoning content / content STREAMING_10 = { "previous_text": "Here is my thought process: foo", "current_text": "Here is my thought process: foo bar Here is my response: baz", "delta_text": " bar Here is my response: baz", "reasoning": " bar ", "content": " baz", } # The delta text starts a new substring that might be a response special seq STREAMING_11 = { "previous_text": "Here is my thought process: This is a reasoning section ", "current_text": "Here is my thought process: This is a reasoning section Here", "delta_text": "Here", "reasoning": None, "content": None, } # The delta text is finishing the response special seq STREAMING_12 = { "previous_text": "Here is my thought process: foo Here is my response", "current_text": "Here is my thought process: foo Here is my response:", "delta_text": ":", "reasoning": None, "content": None, } STREAMING_13 = { "previous_text": "Here is my thought process: foo Here", "current_text": "Here is my thought process: foo Here was", "delta_text": " was", "reasoning": "Here was", "content": None, } STREAMING_SUBCASES = [ pytest.param( STREAMING_1, id="Starting reasoning special sequence", ), pytest.param( STREAMING_2, id="Unexpected start reasoning sequence", ), pytest.param( STREAMING_3, id="Continuing unexpected start reasoning sequence", ), pytest.param( STREAMING_4, id="Only start reasoning sequence and nothing else", ), pytest.param( STREAMING_5, id="Reasoning content has started", ), pytest.param( STREAMING_6, id="Response special sequence has started", ), pytest.param( STREAMING_7, id="Response special sequence reset", ), pytest.param( STREAMING_8, id="Response text has started", ), pytest.param( STREAMING_9, id="Delta contains everything", ), pytest.param( STREAMING_10, id="Delta contains some reasoning and response", ), pytest.param( STREAMING_11, id="Delta starts response sequence", ), pytest.param( STREAMING_12, id="Delta finishes response sequence", ), pytest.param( STREAMING_13, id="Delta breaks potential responise sequence", ), ] @pytest.mark.parametrize("param_dict", STREAMING_SUBCASES) def test_streaming_subcases(param_dict): # Get all of the token IDs previous_token_ids = ( tokenizer.encode(param_dict["previous_text"]) if param_dict["previous_text"] is not None else [] ) current_token_ids = tokenizer.encode(param_dict["current_text"]) delta_token_ids = tokenizer.encode(param_dict["delta_text"]) parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( tokenizer ) response = parser.extract_reasoning_streaming( previous_text=param_dict["previous_text"], current_text=param_dict["current_text"], delta_text=param_dict["delta_text"], previous_token_ids=previous_token_ids, current_token_ids=current_token_ids, delta_token_ids=delta_token_ids, ) # Streaming currently expects at least one of reasoning content / content, # so the response should return None in that case. if param_dict["reasoning"] is None and param_dict["content"] is None: assert response is None else: assert isinstance(response, DeltaMessage) assert param_dict["reasoning"] == response.reasoning assert param_dict["content"] == response.content
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_glm4_moe_reasoning_parser.py
tests/reasoning/test_glm4_moe_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "glm45" start_token = "<think>" end_token = "</think>" REASONING_MODEL_NAME = "zai-org/GLM-4.5" @pytest.fixture(scope="module") def glm45_tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) WITH_THINK = { "output": "<think>This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } WITH_THINK_STREAM = { "output": "<think>This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } WITHOUT_THINK = { "output": "This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": False, } WITHOUT_THINK_STREAM = { "output": "This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": False, } COMPLETE_REASONING = { "output": "<think>This is a reasoning section</think>", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": True, } MULTILINE_REASONING = { "output": "<think>This is a reasoning\nsection</think>This is the rest\nThat", "reasoning": "This is a reasoning\nsection", "content": "This is the rest\nThat", "is_reasoning_end": True, } ONLY_OPEN_TAG = { "output": "<think>This is a reasoning section", "reasoning": None, "content": "<think>This is a reasoning section", "is_reasoning_end": False, } ONLY_OPEN_TAG_STREAM = { "output": "<think>This is a reasoning section", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": False, } TEST_CASES = [ pytest.param( False, WITH_THINK, id="with_think", ), pytest.param( True, WITH_THINK_STREAM, id="with_think_stream", ), pytest.param( False, WITHOUT_THINK, id="without_think", ), pytest.param( True, WITHOUT_THINK_STREAM, id="without_think_stream", ), pytest.param( False, COMPLETE_REASONING, id="complete_reasoning", ), pytest.param( True, COMPLETE_REASONING, id="complete_reasoning_stream", ), pytest.param( False, MULTILINE_REASONING, id="multiline_reasoning", ), pytest.param( True, MULTILINE_REASONING, id="multiline_reasoning_stream", ), pytest.param( False, ONLY_OPEN_TAG, id="only_open_tag", ), pytest.param( True, ONLY_OPEN_TAG_STREAM, id="only_open_tag_stream", ), ] STILL_REASONING_PROMPT = """[gMASK]<sop><|system|> You are a helpful assistant.<|user|> What is the capital of France?<|assistant|> <think>The user is asking for the capital of""" DONE_REASONING_PROMPT = """[gMASK]<sop><|system|> You are a helpful assistant.<|user|> What is the capital of France?<|assistant|> <think>The user is asking for the capital of France.</think> The capital of France is Paris.""" MULTI_TURN_STILL_REASONING_PROMPT = """[gMASK]<sop><|system|> You are a helpful assistant.<|user|> What is the capital of France?<|assistant|> <think></think> The capital of France is Paris.<|user|> What about Chile?<|assistant|> <think>The user is asking for the capital of""" MULTI_TURN_DONE_REASONING_PROMPT = """[gMASK]<sop><|system|> You are a helpful assistant.<|user|> What is the capital of France?<|assistant|> <think></think> The capital of France is Paris.<|user|> What about Chile?<|assistant|> <think>The user is asking for the capital of Chile.</think> The capital of Chile is Santiago.""" REASONING_END_TEST_CASES = [ pytest.param(STILL_REASONING_PROMPT, False, id="still_reasoning"), pytest.param(DONE_REASONING_PROMPT, True, id="done_reasoning"), pytest.param( MULTI_TURN_STILL_REASONING_PROMPT, False, id="multi_turn_still_reasoning" ), pytest.param( MULTI_TURN_DONE_REASONING_PROMPT, True, id="multi_turn_done_reasoning" ), ] @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, glm45_tokenizer, ): output = glm45_tokenizer.tokenize(param_dict["output"]) output_tokens: list[str] = [ glm45_tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( glm45_tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"] output_ids = glm45_tokenizer.convert_tokens_to_ids(output) is_reasoning_end = parser.is_reasoning_end(output_ids) assert is_reasoning_end == param_dict["is_reasoning_end"] @pytest.mark.parametrize("prompt, is_reasoning_end", REASONING_END_TEST_CASES) def test_is_reasoning_end_full_prompt( prompt: str, is_reasoning_end: bool, glm45_tokenizer ): parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( glm45_tokenizer ) tokens = glm45_tokenizer.tokenize(prompt) token_ids = glm45_tokenizer.convert_tokens_to_ids(tokens) check_is_reasoning_end = parser.is_reasoning_end(token_ids) assert check_is_reasoning_end == is_reasoning_end
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_olmo3_reasoning_parser.py
tests/reasoning/test_olmo3_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "olmo3" START_REASONING = "<think>" END_REASONING = "</think>" NO_REASONING = { "output": f"{START_REASONING}{END_REASONING}No thoughts, head empty!", "reasoning": None, "content": "No thoughts, head empty!", } NO_REASONING_WITH_NEWLINE = { "output": f"{START_REASONING}\n{END_REASONING}\n\nNo thoughts, head empty!", "reasoning": "\n", "content": "\n\nNo thoughts, head empty!", } SIMPLE_REASONING = { "output": f"{START_REASONING}This is a reasoning section{END_REASONING}This is the rest", # noqa: E501 "reasoning": "This is a reasoning section", "content": "This is the rest", } SIMPLE_REASONING_WITH_NEWLINE = { "output": f"{START_REASONING} Look!\n\nI'm thinking...{END_REASONING}\nThis is the rest", # noqa: E501 "reasoning": " Look!\n\nI'm thinking...", "content": "\nThis is the rest", } SIMPLE_REASONING_WITH_MULTIPLE_NEWLINES = { "output": f"{START_REASONING}\nLook!\nI'm thinking...\n\n{END_REASONING}\n\n\nThis is the rest", # noqa: E501 "reasoning": "\nLook!\nI'm thinking...\n\n", "content": "\n\n\nThis is the rest", } NO_REASONING_ONLY_END_THINK = { "output": f"{END_REASONING}\n\nNo thoughts, head empty!", "reasoning": None, "content": "\n\nNo thoughts, head empty!", } REASONING_ONLY_END_THINK = { "output": f"The user is asking me not to think.{END_REASONING}No thoughts!", "reasoning": "The user is asking me not to think.", "content": "No thoughts!", } TEST_CASES = [ pytest.param( False, # not streaming NO_REASONING, id="no_reasoning", ), pytest.param( False, # not streaming NO_REASONING_WITH_NEWLINE, id="no_reasoning_with_newline", ), pytest.param( False, # not streaming SIMPLE_REASONING, id="simple_reasoning", ), pytest.param( False, # not streaming SIMPLE_REASONING_WITH_NEWLINE, id="simple_reasoning_with_newline", ), pytest.param( True, # enable streaming SIMPLE_REASONING_WITH_MULTIPLE_NEWLINES, id="simple_reasoning_with_multiple_newlines", ), pytest.param( False, # not streaming NO_REASONING_ONLY_END_THINK, id="no_reasoning_only_end_think", ), pytest.param( False, # not streaming REASONING_ONLY_END_THINK, id="yes_reasoning_only_end_think", ), pytest.param( True, # enable streaming NO_REASONING, id="no_reasoning_streaming", ), pytest.param( True, # enable streaming NO_REASONING_WITH_NEWLINE, id="no_reasoning_with_newline_streaming", ), pytest.param( True, # enable streaming SIMPLE_REASONING, id="simple_reasoning_streaming", ), pytest.param( True, # enable streaming SIMPLE_REASONING_WITH_NEWLINE, id="simple_reasoning_with_newline_streaming", ), pytest.param( True, # enable streaming SIMPLE_REASONING_WITH_MULTIPLE_NEWLINES, id="simple_reasoning_with_multiple_newlines_streaming", ), pytest.param( True, # enable streaming NO_REASONING_ONLY_END_THINK, id="no_reasoning_only_end_think_streaming", ), pytest.param( True, # enable streaming REASONING_ONLY_END_THINK, id="yes_reasoning_only_end_think_streaming", ), ] # Global tokenizer initialization to avoid repeated loading tokenizer = AutoTokenizer.from_pretrained("allenai/dolma2-tokenizer") @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict[str, str], ): output = tokenizer.tokenize(param_dict["output"]) # decode everything to tokens model_output: list[str] = [ tokenizer.convert_tokens_to_string([token]) for token in output ] parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser: ReasoningParser = parser_cls(tokenizer) reasoning, content = run_reasoning_extraction( reasoning_parser=parser, model_output=model_output, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_holo2_reasoning_parser.py
tests/reasoning/test_holo2_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager from vllm.reasoning.deepseek_r1_reasoning_parser import DeepSeekR1ReasoningParser from vllm.reasoning.holo2_reasoning_parser import Holo2ReasoningParser from vllm.reasoning.identity_reasoning_parser import IdentityReasoningParser REASONING_MODEL_NAME = "HCompany/Holo2-4B" @pytest.fixture(scope="module") def tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) @pytest.mark.parametrize( "thinking,expected_parser_type", [ (True, DeepSeekR1ReasoningParser), (False, IdentityReasoningParser), ], ) def test_parser_selection(tokenizer, thinking, expected_parser_type): parser = Holo2ReasoningParser( tokenizer, chat_template_kwargs={ "thinking": thinking, }, ) assert isinstance(parser._parser, expected_parser_type) def test_holo2_default_parser_is_deepseekr1(tokenizer): parser = Holo2ReasoningParser(tokenizer) assert isinstance(parser._parser, DeepSeekR1ReasoningParser) def test_holo2_supports_structured_output(tokenizer): # Structured output manager uses the reasoning parser to check if the # reasoning content is ended before applying the grammar. The main function # used is is_reasoning_end. This test checks if the parser is able to # correctly identify the end of the reasoning content. # important to not pass chat_template_kwargs here as it is done in the # StructuredOutputManager parser = Holo2ReasoningParser(tokenizer) end_token_id = tokenizer.encode("</think>", add_special_tokens=False)[0] assert parser.is_reasoning_end([1, 2, 4, end_token_id]) assert not parser.is_reasoning_end([1, 2, 4]) assert parser.is_reasoning_end([1, 2, 4, end_token_id, 5]) # thinking is True, non-streaming WITH_THINK = { "output": "This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", } # thinking is True, streaming WITH_THINK_STREAM = { "output": "This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", } # thinking is False, non-streaming THINKING_DISABLED = { "output": "This is the rest", "reasoning": None, "content": "This is the rest", } # thinking is False, streaming THINKING_DISABLED_STREAM = { "output": "This is the rest", "reasoning": None, "content": "This is the rest", } # thinking is False but the model output </think>, non-streaming THINKING_DISABLED_WITH_CLOSE_TAG = { "output": "</think>This is the rest", "reasoning": None, "content": "</think>This is the rest", } # thinking is False but the model output </think>, streaming THINKING_DISABLED_WITH_CLOSE_TAG_STREAM = { "output": "some text</think>This is the rest", "reasoning": None, "content": "some text</think>This is the rest", } COMPLETE_REASONING = { "output": "This is a reasoning section</think>", "reasoning": "This is a reasoning section", "content": None, } TEST_CASES = [ pytest.param( False, WITH_THINK, None, id="with_think", ), pytest.param( True, WITH_THINK_STREAM, None, id="with_think_stream", ), pytest.param( False, WITH_THINK, {"thinking": True}, id="with_think_enabled", ), pytest.param( True, WITH_THINK_STREAM, {"thinking": True}, id="with_think_stream_enabled", ), pytest.param( False, THINKING_DISABLED, {"thinking": False}, id="thinking_disabled", ), pytest.param( True, THINKING_DISABLED_STREAM, {"thinking": False}, id="thinking_disabled_stream", ), pytest.param( False, THINKING_DISABLED_WITH_CLOSE_TAG, {"thinking": False}, id="thinking_disabled_with_close_tag", ), pytest.param( True, THINKING_DISABLED_WITH_CLOSE_TAG_STREAM, {"thinking": False}, id="thinking_disabled_with_close_tag_stream", ), pytest.param( False, COMPLETE_REASONING, None, id="complete_reasoning", ), pytest.param( True, COMPLETE_REASONING, None, id="complete_reasoning_stream", ), ] @pytest.mark.parametrize("streaming, param_dict, chat_template_kwargs", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, chat_template_kwargs: dict | None, tokenizer, ): output = tokenizer.tokenize(param_dict["output"]) output_tokens: list[str] = [ tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser("holo2")( tokenizer, chat_template_kwargs=chat_template_kwargs, ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_qwen3_reasoning_parser.py
tests/reasoning/test_qwen3_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "qwen3" start_token = "<think>" end_token = "</think>" REASONING_MODEL_NAME = "Qwen/Qwen3-0.6B" @pytest.fixture(scope="module") def qwen3_tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) # 带 <think></think>,非stream WITH_THINK = { "output": "<think>This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", } # 带 <think></think>,stream WITH_THINK_STREAM = { "output": "<think>This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", } # 不带 <think></think>,非stream WITHOUT_THINK = { "output": "This is the rest", "reasoning": None, "content": "This is the rest", } # 不带 <think></think>,stream WITHOUT_THINK_STREAM = { "output": "This is the rest", "reasoning": None, "content": "This is the rest", } COMPLETE_REASONING = { "output": "<think>This is a reasoning section</think>", "reasoning": "This is a reasoning section", "content": None, } MULTILINE_REASONING = { "output": "<think>This is a reasoning\nsection</think>This is the rest\nThat", "reasoning": "This is a reasoning\nsection", "content": "This is the rest\nThat", } ONLY_OPEN_TAG = { "output": "<think>This is a reasoning section", "reasoning": None, "content": "<think>This is a reasoning section", } ONLY_OPEN_TAG_STREAM = { "output": "<think>This is a reasoning section", "reasoning": "This is a reasoning section", "content": None, } TEST_CASES = [ pytest.param( False, WITH_THINK, id="with_think", ), pytest.param( True, WITH_THINK_STREAM, id="with_think_stream", ), pytest.param( False, WITHOUT_THINK, id="without_think", ), pytest.param( True, WITHOUT_THINK_STREAM, id="without_think_stream", ), pytest.param( False, COMPLETE_REASONING, id="complete_reasoning", ), pytest.param( True, COMPLETE_REASONING, id="complete_reasoning_stream", ), pytest.param( False, MULTILINE_REASONING, id="multiline_reasoning", ), pytest.param( True, MULTILINE_REASONING, id="multiline_reasoning_stream", ), pytest.param( False, ONLY_OPEN_TAG, id="only_open_tag", ), pytest.param( True, ONLY_OPEN_TAG_STREAM, id="only_open_tag_stream", ), ] @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, qwen3_tokenizer, ): output = qwen3_tokenizer.tokenize(param_dict["output"]) output_tokens: list[str] = [ qwen3_tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( qwen3_tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_minimax_m2_reasoning_parser.py
tests/reasoning/test_minimax_m2_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "minimax_m2" end_token = "</think>" # MiniMax M2 model path REASONING_MODEL_NAME = "MiniMaxAI/MiniMax-M2" @pytest.fixture(scope="module") def minimax_m2_tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) # ============================================================================= # MiniMax M2 specific behavior: # - Model does NOT generate <think> start token # - Model only generates </think> end token # - All content before </think> is reasoning # - All content after </think> is the actual response (content) # ============================================================================= # Case: reasoning + end token + content (typical case) SIMPLE_REASONING = { "output": "This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } # Case: reasoning + end token only (no content after) COMPLETE_REASONING = { "output": "This is a reasoning section</think>", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": True, } # Case: no end token yet (streaming in progress, all is reasoning) NO_END_TOKEN = { "output": "This is reasoning in progress", "reasoning": "This is reasoning in progress", "content": None, "is_reasoning_end": False, } # Case: multiple lines of reasoning MULTIPLE_LINES = { "output": "First line\nSecond line</think>Response first line\nResponse second", "reasoning": "First line\nSecond line", "content": "Response first line\nResponse second", "is_reasoning_end": True, } # Case: only end token (empty reasoning, immediate response) SHORTEST_REASONING_NO_STREAMING = { "output": "</think>This is the response", "reasoning": "", "content": "This is the response", "is_reasoning_end": True, } # Case: only end token streaming (reasoning is None because it's just the token) SHORTEST_REASONING_STREAMING = { "output": "</think>This is the response", "reasoning": None, "content": "This is the response", "is_reasoning_end": True, } # Case: empty output EMPTY = { "output": "", "reasoning": "", "content": None, "is_reasoning_end": False, } # Case: empty streaming EMPTY_STREAMING = { "output": "", "reasoning": None, "content": None, "is_reasoning_end": False, } # Case: long reasoning with special characters SPECIAL_CHARS = { "output": "Let me think... 1+1=2, right?</think>Yes, 1+1=2.", "reasoning": "Let me think... 1+1=2, right?", "content": "Yes, 1+1=2.", "is_reasoning_end": True, } # Case: reasoning with code blocks CODE_IN_REASONING = { "output": "```python\nprint('hello')\n```</think>Here is the code.", "reasoning": "```python\nprint('hello')\n```", "content": "Here is the code.", "is_reasoning_end": True, } TEST_CASES = [ # Core cases: no start token (MiniMax M2 actual behavior) pytest.param( False, SIMPLE_REASONING, id="simple_reasoning", ), pytest.param( True, SIMPLE_REASONING, id="simple_reasoning_streaming", ), pytest.param( False, COMPLETE_REASONING, id="complete_reasoning", ), pytest.param( True, COMPLETE_REASONING, id="complete_reasoning_streaming", ), pytest.param( False, NO_END_TOKEN, id="no_end_token", ), pytest.param( True, NO_END_TOKEN, id="no_end_token_streaming", ), pytest.param( False, MULTIPLE_LINES, id="multiple_lines", ), pytest.param( True, MULTIPLE_LINES, id="multiple_lines_streaming", ), pytest.param( False, SHORTEST_REASONING_NO_STREAMING, id="shortest_reasoning", ), pytest.param( True, SHORTEST_REASONING_STREAMING, id="shortest_reasoning_streaming", ), pytest.param( False, EMPTY, id="empty", ), pytest.param( True, EMPTY_STREAMING, id="empty_streaming", ), pytest.param( False, SPECIAL_CHARS, id="special_chars", ), pytest.param( True, SPECIAL_CHARS, id="special_chars_streaming", ), pytest.param( False, CODE_IN_REASONING, id="code_in_reasoning", ), pytest.param( True, CODE_IN_REASONING, id="code_in_reasoning_streaming", ), ] @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, minimax_m2_tokenizer, ): output = minimax_m2_tokenizer.tokenize(param_dict["output"]) # decode everything to tokens output_tokens: list[str] = [ minimax_m2_tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( minimax_m2_tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"] # Test is_reasoning_end output_ids = minimax_m2_tokenizer.convert_tokens_to_ids(output) is_reasoning_end = parser.is_reasoning_end(output_ids) assert is_reasoning_end == param_dict["is_reasoning_end"] # Test extract_content if param_dict["content"] is not None: content = parser.extract_content_ids(output_ids) assert content == minimax_m2_tokenizer.convert_tokens_to_ids( minimax_m2_tokenizer.tokenize(param_dict["content"]) ) else: content = parser.extract_content_ids(output) assert content == []
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_deepseekr1_reasoning_parser.py
tests/reasoning/test_deepseekr1_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "deepseek_r1" start_token = "<think>" end_token = "</think>" REASONING_MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" @pytest.fixture(scope="module") def deepseek_r1_qwen_tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) SIMPLE_REASONING = { "output": "This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } COMPLETE_REASONING = { "output": "This is a reasoning section</think>", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": True, } NO_CONTENT = { "output": "This is content", "reasoning": "This is content", "content": None, "is_reasoning_end": False, } NO_REASONING_STREAMING = { "output": "This is a reasoning section", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": False, } MULTIPLE_LINES = { "output": "This\nThat</think>This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", "is_reasoning_end": True, } SHORTEST_REASONING_NO_STREAMING = { "output": "</think>This is the rest", "reasoning": "", "content": "This is the rest", "is_reasoning_end": True, } SHORTEST_REASONING = { "output": "</think>This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": True, } REASONING_WITH_THINK = { "output": "<think>This is a reasoning section</think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } COMPLETE_REASONING_WITH_THINK = { "output": "<think>This is a reasoning section</think>", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": True, } MULTIPLE_LINES_WITH_THINK = { "output": "<think>This\nThat</think>This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", "is_reasoning_end": True, } SHORTEST_REASONING_NO_STREAMING_WITH_THINK = { "output": "</think>This is the rest", "reasoning": "", "content": "This is the rest", "is_reasoning_end": True, } SHORTEST_REASONING_WITH_THINK = { "output": "</think>This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": True, } THINK_NO_END = { "output": "<think>This is a reasoning section", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": False, } EMPTY = { "output": "", "reasoning": "", "content": None, "is_reasoning_end": False, } EMPTY_STREAMING = { "output": "", "reasoning": None, "content": None, "is_reasoning_end": False, } NEW_LINE = { "output": "\n<think>This is a reasoning section</think>\nThis is the rest", "reasoning": "This is a reasoning section", "content": "\nThis is the rest", "is_reasoning_end": True, } # Streaming cannot handle new lines at the beginning of the output # because we need to support <think>...</think> and </think>... # We cannot know if the text before <think> is reasoning content # or not. NEW_LINE_STREAMING = { "output": "\n<think>This is a reasoning section</think>\nThis is the rest", "reasoning": "\nThis is a reasoning section", "content": "\nThis is the rest", "is_reasoning_end": True, } TEST_CASES = [ pytest.param( False, SIMPLE_REASONING, id="simple_reasoning", ), pytest.param( True, SIMPLE_REASONING, id="simple_reasoning_streaming", ), pytest.param( False, COMPLETE_REASONING, id="complete_reasoning", ), pytest.param( True, COMPLETE_REASONING, id="complete_reasoning_streaming", ), pytest.param( False, NO_CONTENT, id="no_content_token", ), pytest.param( True, NO_REASONING_STREAMING, id="no_reasoning_token_streaming", ), pytest.param( False, MULTIPLE_LINES, id="multiple_lines", ), pytest.param( True, MULTIPLE_LINES, id="multiple_lines_streaming", ), pytest.param( True, SHORTEST_REASONING, id="shortest", ), pytest.param( False, SHORTEST_REASONING_NO_STREAMING, id="shortest_streaming", ), pytest.param( False, REASONING_WITH_THINK, id="reasoning_with_think", ), pytest.param( True, REASONING_WITH_THINK, id="reasoning_with_think_streaming", ), pytest.param( False, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think", ), pytest.param( True, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think_streaming", ), pytest.param( False, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think", ), pytest.param( True, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think_streaming", ), pytest.param( False, SHORTEST_REASONING_NO_STREAMING_WITH_THINK, id="shortest_with_think", ), pytest.param( True, SHORTEST_REASONING_WITH_THINK, id="shortest_with_think_streaming", ), pytest.param( False, THINK_NO_END, id="think_no_end", ), pytest.param( True, THINK_NO_END, id="think_no_end_streaming", ), pytest.param( False, EMPTY, id="empty", ), pytest.param( True, EMPTY_STREAMING, id="empty_streaming", ), pytest.param( False, NEW_LINE, id="new_line", ), pytest.param( True, NEW_LINE_STREAMING, id="new_line_streaming", ), ] @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, deepseek_r1_qwen_tokenizer, ): output = deepseek_r1_qwen_tokenizer.tokenize(param_dict["output"]) # decode everything to tokens output_tokens: list[str] = [ deepseek_r1_qwen_tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( deepseek_r1_qwen_tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"] # Test is_reasoning_end output_ids = deepseek_r1_qwen_tokenizer.convert_tokens_to_ids(output) is_reasoning_end = parser.is_reasoning_end(output_ids) assert is_reasoning_end == param_dict["is_reasoning_end"] # Test extract_content if param_dict["content"] is not None: content = parser.extract_content_ids(output_ids) assert content == deepseek_r1_qwen_tokenizer.convert_tokens_to_ids( deepseek_r1_qwen_tokenizer.tokenize(param_dict["content"]) ) else: content = parser.extract_content_ids(output) assert content == []
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_gptoss_reasoning_parser.py
tests/reasoning/test_gptoss_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from vllm.reasoning import ReasoningParser from vllm.reasoning.gptoss_reasoning_parser import GptOssReasoningParser REASONING_MODEL_NAME = "openai/gpt-oss-120b" @pytest.fixture(scope="module") def gpt_oss_tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) USER_MESSAGE_START = "<|start|>user<|message|>" REASONING_SECTION_START = "<|end|><|start|>assistant<|channel|>analysis<|message|>" ASSISTANT_CONTENT_START_PREFIX = "<|end|><|start|>assistant<|channel|>final" ASSISTANT_CONTENT_START_SUFFIX = "<|message|>" ASSISTANT_CONTENT_START = ( ASSISTANT_CONTENT_START_PREFIX + ASSISTANT_CONTENT_START_SUFFIX ) BASIC_CONTENT = { "output": REASONING_SECTION_START + "This is reasoning" + ASSISTANT_CONTENT_START + "This is the rest", "is_reasoning_end": True, } BASIC_REASONING_ONLY = { "output": REASONING_SECTION_START + "This is reasoning" + "<|end|>", "is_reasoning_end": False, } BASIC_NO_REASONING_NO_ASSISTANT = { "output": USER_MESSAGE_START + "This is a user message", "is_reasoning_end": False, } # Edge-case where the model omits the assistant tag entirely. BASIC_NO_REASONING_ASSISTANT = { "output": USER_MESSAGE_START + "This is a user message<|end|><|channel|>final", "is_reasoning_end": True, } COMPLEX_CONTENT_INCOMPLETE_PREFIX_ONLY = { "output": REASONING_SECTION_START + "This is reasoning" + ASSISTANT_CONTENT_START_PREFIX, "is_reasoning_end": False, } COMPLEX_CONTENT_SUFFIX_ONLY = { "output": REASONING_SECTION_START + "This is reasoning" + ASSISTANT_CONTENT_START_SUFFIX, "is_reasoning_end": False, } COMPLEX_CONTENT_1_NO_SUFFIX = { "output": REASONING_SECTION_START + "This is reasoning" + ASSISTANT_CONTENT_START_PREFIX + "<|constrain|> JSON ", "is_reasoning_end": False, } COMPLEX_CONTENT_1 = { "output": REASONING_SECTION_START + "This is reasoning" + ASSISTANT_CONTENT_START_PREFIX + "<|constrain|> JSON " + ASSISTANT_CONTENT_START_SUFFIX, "is_reasoning_end": True, } COMPLEX_CONTENT_1_WITH_CONTENT = { "output": REASONING_SECTION_START + "This is reasoning" + ASSISTANT_CONTENT_START_PREFIX + "<|constrain|> JSON " + ASSISTANT_CONTENT_START_SUFFIX + "This is the rest", "is_reasoning_end": True, } COMPLEX_CONTENT_2 = { "output": REASONING_SECTION_START + "This is reasoning" + ASSISTANT_CONTENT_START_PREFIX + "<|constrain|>ReplyAction " + ASSISTANT_CONTENT_START_SUFFIX + "This is the rest", "is_reasoning_end": True, } TEST_CASES = [ BASIC_CONTENT, BASIC_REASONING_ONLY, COMPLEX_CONTENT_INCOMPLETE_PREFIX_ONLY, COMPLEX_CONTENT_SUFFIX_ONLY, COMPLEX_CONTENT_1_NO_SUFFIX, COMPLEX_CONTENT_1, COMPLEX_CONTENT_1_WITH_CONTENT, COMPLEX_CONTENT_2, ] @pytest.mark.parametrize( "output, is_reasoning_end", [(t["output"], t["is_reasoning_end"]) for t in TEST_CASES], ) def test_gptoss_is_reasoning_end( output, is_reasoning_end, gpt_oss_tokenizer, ): output = gpt_oss_tokenizer.tokenize(output) parser: ReasoningParser = GptOssReasoningParser(gpt_oss_tokenizer) # Test is_reasoning_end output_ids = gpt_oss_tokenizer.convert_tokens_to_ids(output) actual_is_reasoning_end = parser.is_reasoning_end(output_ids) assert is_reasoning_end == actual_is_reasoning_end
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_seedoss_reasoning_parser.py
tests/reasoning/test_seedoss_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import Any, cast import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "seed_oss" start_token = "<seed:think>" end_token = "</seed:think>" # Use a test model that contains our custom tokens REASONING_MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" @pytest.fixture(scope="module") def seedoss_tokenizer(): tokenizer = AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) # Add custom SeedOSS tokens if they don't exist if start_token not in tokenizer.get_vocab(): tokenizer.add_tokens([start_token, end_token]) return tokenizer SIMPLE_REASONING: dict[str, Any] = { "output": "This is a reasoning section</seed:think>This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } COMPLETE_REASONING: dict[str, Any] = { "output": "This is a reasoning section</seed:think>", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": True, } NO_CONTENT: dict[str, Any] = { "output": "This is content", "reasoning": "This is content", "content": None, "is_reasoning_end": False, } NO_REASONING_STREAMING: dict[str, Any] = { "output": "This is a reasoning section", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": False, } MULTIPLE_LINES: dict[str, Any] = { "output": "This\nThat</seed:think>This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", "is_reasoning_end": True, } WITH_START_TOKEN: dict[str, Any] = { "output": ("<seed:think>This is a reasoning section</seed:think>This is the rest"), "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } ONLY_END_TOKEN: dict[str, Any] = { "output": "Some reasoning</seed:think>This is the rest", "reasoning": "Some reasoning", "content": "This is the rest", "is_reasoning_end": True, } NO_TOKENS: dict[str, Any] = { "output": "This is just content without any reasoning tokens", "reasoning": "This is just content without any reasoning tokens", "content": None, "is_reasoning_end": False, } def test_seedoss_reasoning_parser_creation(seedoss_tokenizer): """Test that the SeedOSS reasoning parser can be created and registered.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) assert isinstance(parser, ReasoningParser) assert parser.start_token == start_token assert parser.end_token == end_token @pytest.mark.parametrize("streaming", [True, False]) def test_simple_reasoning(seedoss_tokenizer, streaming): """Test basic reasoning extraction with both tokens.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) reasoning, content = run_reasoning_extraction( parser, [cast(str, SIMPLE_REASONING["output"])], streaming=streaming ) assert reasoning == SIMPLE_REASONING["reasoning"] assert content == SIMPLE_REASONING["content"] @pytest.mark.parametrize("streaming", [True, False]) def test_complete_reasoning(seedoss_tokenizer, streaming): """Test reasoning extraction when there's no content after reasoning.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) reasoning, content = run_reasoning_extraction( parser, [cast(str, COMPLETE_REASONING["output"])], streaming=streaming ) assert reasoning == COMPLETE_REASONING["reasoning"] assert content == COMPLETE_REASONING["content"] @pytest.mark.parametrize("streaming", [True, False]) def test_no_content(seedoss_tokenizer, streaming): """Test when there's no end token - everything is reasoning content.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) reasoning, content = run_reasoning_extraction( parser, [cast(str, NO_CONTENT["output"])], streaming=streaming ) assert reasoning == NO_CONTENT["reasoning"] assert content == NO_CONTENT["content"] @pytest.mark.parametrize("streaming", [True, False]) def test_multiple_lines(seedoss_tokenizer, streaming): """Test reasoning extraction with multiline content.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) reasoning, content = run_reasoning_extraction( parser, [cast(str, MULTIPLE_LINES["output"])], streaming=streaming ) assert reasoning == MULTIPLE_LINES["reasoning"] assert content == MULTIPLE_LINES["content"] @pytest.mark.parametrize("streaming", [True, False]) def test_with_start_token(seedoss_tokenizer, streaming): """Test reasoning extraction with both start and end tokens.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) reasoning, content = run_reasoning_extraction( parser, [cast(str, WITH_START_TOKEN["output"])], streaming=streaming ) assert reasoning == WITH_START_TOKEN["reasoning"] assert content == WITH_START_TOKEN["content"] @pytest.mark.parametrize("streaming", [True, False]) def test_only_end_token(seedoss_tokenizer, streaming): """ Test reasoning extraction with only end token (SeedOSS typical behavior). """ parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) reasoning, content = run_reasoning_extraction( parser, [cast(str, ONLY_END_TOKEN["output"])], streaming=streaming ) assert reasoning == ONLY_END_TOKEN["reasoning"] assert content == ONLY_END_TOKEN["content"] @pytest.mark.parametrize("streaming", [True, False]) def test_no_tokens(seedoss_tokenizer, streaming): """Test when there are no reasoning tokens at all.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) reasoning, content = run_reasoning_extraction( parser, [cast(str, NO_TOKENS["output"])], streaming=streaming ) assert reasoning == NO_TOKENS["reasoning"] assert content == NO_TOKENS["content"] def test_is_reasoning_end(seedoss_tokenizer): """Test the is_reasoning_end method.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) # Test with end token present end_token_id = parser.end_token_id assert parser.is_reasoning_end([1, 2, end_token_id, 4]) is True # Test without end token assert parser.is_reasoning_end([1, 2, 3, 4]) is False def test_extract_content_ids(seedoss_tokenizer): """Test the extract_content_ids method.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) end_token_id = parser.end_token_id # Test with end token in the middle input_ids = [1, 2, end_token_id, 4, 5] content_ids = parser.extract_content_ids(input_ids) assert content_ids == [4, 5] # Test with end token at the end input_ids = [1, 2, 3, end_token_id] content_ids = parser.extract_content_ids(input_ids) assert content_ids == [] # Test without end token input_ids = [1, 2, 3, 4] content_ids = parser.extract_content_ids(input_ids) assert content_ids == [] def test_streaming_delta_processing(seedoss_tokenizer): """Test streaming processing with small deltas.""" parser_cls = ReasoningParserManager.get_reasoning_parser(parser_name) parser = parser_cls(seedoss_tokenizer) # Test streaming with incremental tokens deltas = ["Some ", "reasoning ", "content", "</seed:think>", "Final ", "answer"] reasoning, content = run_reasoning_extraction(parser, deltas, streaming=True) assert reasoning == "Some reasoning content" assert content == "Final answer"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/utils.py
tests/reasoning/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.entrypoints.openai.protocol import ChatCompletionRequest, DeltaMessage from vllm.reasoning import ReasoningParser from vllm.tokenizers.mistral import MistralTokenizer class StreamingReasoningReconstructor: def __init__(self): self.reasoning = None self.other_content = None def append_delta(self, delta: DeltaMessage): # content and the reasoning content should not be present # at the same time assert delta.content is None or delta.reasoning is None, ( "Both content and reasoning content are present in the delta message" ) assert delta.reasoning == delta.reasoning_content, ( "reasoning_content should be present for backwards compatibility" ) if delta.content is not None: if self.other_content is None: self.other_content = delta.content else: self.other_content += delta.content else: if self.reasoning is None: self.reasoning = delta.reasoning else: self.reasoning += delta.reasoning def run_reasoning_extraction( reasoning_parser: ReasoningParser, model_output: list[str], request: ChatCompletionRequest | None = None, streaming: bool = False, ) -> tuple[str | None, str | None]: if streaming: reconstructor = run_reasoning_extraction_streaming( reasoning_parser, model_output, request, ) return ( reconstructor.reasoning, reconstructor.other_content or None, ) else: reasoning, content = run_reasoning_extraction_nonstreaming( reasoning_parser, model_output, request ) return reasoning, content def run_reasoning_extraction_mistral( reasoning_parser: ReasoningParser, model_output: list[int], request: ChatCompletionRequest | None = None, streaming: bool = False, ) -> tuple[str | None, str | None]: assert isinstance(reasoning_parser.model_tokenizer, MistralTokenizer), type( reasoning_parser.model_tokenizer ) if streaming: reconstructor = run_reasoning_extraction_streaming_mistral( reasoning_parser, model_output, request, ) return ( reconstructor.reasoning, reconstructor.other_content or None, ) else: str_output = reasoning_parser.model_tokenizer.convert_ids_to_tokens( model_output ) reasoning, content = run_reasoning_extraction_nonstreaming( reasoning_parser, str_output, request ) return reasoning, content def run_reasoning_extraction_nonstreaming( reasoning_parser: ReasoningParser, model_output: list[str], request: ChatCompletionRequest | None = None, ) -> tuple[str | None, str | None]: request = request or ChatCompletionRequest(messages=[], model="test-model") return reasoning_parser.extract_reasoning( model_output="".join(model_output), request=request ) def run_reasoning_extraction_streaming( reasoning_parser: ReasoningParser, model_deltas: list[str], request: ChatCompletionRequest | None = None, ) -> StreamingReasoningReconstructor: request = request or ChatCompletionRequest(messages=[], model="test-model") reconstructor = StreamingReasoningReconstructor() previous_text = "" previous_tokens: list[int] = [] for delta in model_deltas: token_delta = [ reasoning_parser.vocab.get(token) for token in reasoning_parser.model_tokenizer.tokenize(delta) if token in reasoning_parser.vocab ] current_text = previous_text + delta current_tokens = previous_tokens + token_delta delta_message = reasoning_parser.extract_reasoning_streaming( previous_text, current_text, delta, previous_tokens, current_tokens, token_delta, ) if delta_message is not None: reconstructor.append_delta(delta_message) previous_text = current_text previous_tokens = current_tokens return reconstructor def run_reasoning_extraction_streaming_mistral( reasoning_parser: ReasoningParser, model_deltas: list[int], request: ChatCompletionRequest | None = None, ) -> StreamingReasoningReconstructor: assert isinstance(reasoning_parser.model_tokenizer, MistralTokenizer), type( reasoning_parser.model_tokenizer ) request = request or ChatCompletionRequest(messages=[], model="test-model") reconstructor = StreamingReasoningReconstructor() previous_text = "" previous_tokens: list[int] = [] for model_delta in model_deltas: token_delta = [model_delta] delta = reasoning_parser.model_tokenizer.convert_ids_to_tokens([model_delta])[0] current_text = previous_text + delta current_tokens = previous_tokens + token_delta delta_message = reasoning_parser.extract_reasoning_streaming( previous_text, current_text, delta, previous_tokens, current_tokens, token_delta, ) if delta_message is not None: reconstructor.append_delta(delta_message) previous_text = current_text previous_tokens = current_tokens return reconstructor
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_ernie45_reasoning_parser.py
tests/reasoning/test_ernie45_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "ernie45" REASONING_MODEL_NAME = "baidu/ERNIE-4.5-21B-A3B-Thinking" @pytest.fixture(scope="module") def ernie45_tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) # 带 </think>,非stream WITH_THINK = { "output": "abc</think>def", "reasoning": "abc", "content": "def", } # 带 </think>,stream WITH_THINK_STREAM = { "output": "abc</think>def", "reasoning": "abc", "content": "def", } # without </think>, all is reasoning WITHOUT_THINK = { "output": "abc", "reasoning": "abc", "content": None, } # without </think>, all is reasoning WITHOUT_THINK_STREAM = { "output": "abc", "reasoning": "abc", "content": None, } COMPLETE_REASONING = { "output": "abc</think>", "reasoning": "abc", "content": None, } MULTILINE_REASONING = { "output": "abc\nABC</think>def\nDEF", "reasoning": "abc\nABC", "content": "def\nDEF", } TEST_CASES = [ pytest.param( False, WITH_THINK, id="with_think", ), pytest.param( True, WITH_THINK_STREAM, id="with_think_stream", ), pytest.param( False, WITHOUT_THINK, id="without_think", ), pytest.param( True, WITHOUT_THINK_STREAM, id="without_think_stream", ), pytest.param( False, COMPLETE_REASONING, id="complete_reasoning", ), pytest.param( True, COMPLETE_REASONING, id="complete_reasoning_stream", ), pytest.param( False, MULTILINE_REASONING, id="multiline_reasoning", ), pytest.param( True, MULTILINE_REASONING, id="multiline_reasoning_stream", ), ] @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, ernie45_tokenizer, ): output = ernie45_tokenizer.tokenize(param_dict["output"]) output_tokens: list[str] = [] for token in output: one_token = ernie45_tokenizer.convert_tokens_to_string([token]) if one_token: output_tokens.append(one_token) parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( ernie45_tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) print() assert reasoning == param_dict["reasoning"] assert content == param_dict["content"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/__init__.py
tests/reasoning/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_deepseekv3_reasoning_parser.py
tests/reasoning/test_deepseekv3_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from vllm.entrypoints.openai.protocol import ChatCompletionRequest, DeltaMessage from vllm.reasoning.deepseek_r1_reasoning_parser import DeepSeekR1ReasoningParser from vllm.reasoning.deepseek_v3_reasoning_parser import DeepSeekV3ReasoningParser from vllm.reasoning.identity_reasoning_parser import IdentityReasoningParser REASONING_MODEL_NAME = "deepseek-ai/DeepSeek-V3.1" @pytest.fixture(scope="module") def tokenizer(): return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) @pytest.mark.parametrize( "thinking,expected_parser_type", [ (True, DeepSeekR1ReasoningParser), (False, IdentityReasoningParser), ], ) def test_parser_selection(tokenizer, thinking, expected_parser_type): parser = DeepSeekV3ReasoningParser( tokenizer, chat_template_kwargs={"thinking": thinking} ) assert isinstance(parser._parser, expected_parser_type) def test_identity_reasoning_parser_basic(tokenizer): parser = IdentityReasoningParser(tokenizer) # Test is_reasoning_end always returns True input_text = "This is some output" input_tokens = tokenizer.tokenize(input_text) input_ids = tokenizer.convert_tokens_to_ids(input_tokens) assert parser.is_reasoning_end(input_ids) is True assert parser.is_reasoning_end_streaming(input_ids, input_ids) is True # Test extract_content_ids returns all input_ids assert parser.extract_content_ids(input_ids) == input_ids # Test extract_reasoning returns (None, model_output) request = ChatCompletionRequest(model="test-model", messages=[], temperature=1.0) reasoning, content = parser.extract_reasoning(input_text, request) assert reasoning is None assert content == input_text # Test extract_reasoning_streaming returns DeltaMessage or None result = parser.extract_reasoning_streaming( previous_text="", current_text="Hello world", delta_text="Hello world", previous_token_ids=[], current_token_ids=input_ids, delta_token_ids=input_ids, ) assert isinstance(result, DeltaMessage) assert result.content == "Hello world" # If delta_text is empty, should return None result_none = parser.extract_reasoning_streaming( previous_text="Hello world", current_text="Hello world", delta_text="", previous_token_ids=input_ids, current_token_ids=input_ids, delta_token_ids=[], ) assert result_none is None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_hunyuan_reasoning_parser.py
tests/reasoning/test_hunyuan_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.reasoning import ReasoningParser, ReasoningParserManager parser_name = "hunyuan_a13b" START_REASONING = "<think>\n" START_RESPONSE = "\n</think>\n<answer>\n" END_RESPONSE = "\n</answer>" NO_REASONING_QUICK_THROUGHT = { "output": f"{START_REASONING}{START_RESPONSE}This is the rest{END_RESPONSE}", # noqa: E501 "reasoning": None, "content": "This is the rest", } SIMPLE_REASONING = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}This is the rest{END_RESPONSE}", # noqa: E501 "reasoning": "This is a reasoning section", "content": "This is the rest", } COMPLETE_REASONING = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}", "reasoning": "This is a reasoning section", "content": None, } COMPLETE_REASONING_WITH_SYMBOL = { "output": f"{START_REASONING}This is a reasoning section!{START_RESPONSE}", "reasoning": "This is a reasoning section!", "content": None, } NO_REASONING = { "output": "This is content", "reasoning": None, "content": "This is content", } MULTIPLE_LINES = { "output": f"{START_REASONING}This\nThat{START_RESPONSE}This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", } REASONING_WITH_THINK = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}This is the rest", # noqa: E501 "reasoning": "This is a reasoning section", "content": "This is the rest", } COMPLETE_REASONING_WITH_THINK = { "output": f"{START_REASONING}This is a reasoning section{START_RESPONSE}", "reasoning": "This is a reasoning section", "content": None, } MULTIPLE_LINES_WITH_THINK = { "output": f"{START_REASONING}This\nThat{START_RESPONSE}This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", } TEST_CASES = [ pytest.param( False, SIMPLE_REASONING, id="simple_reasoning", ), pytest.param( False, COMPLETE_REASONING, id="complete_reasoning", ), pytest.param( False, COMPLETE_REASONING_WITH_SYMBOL, id="complete_reasoning_with_symbol", ), pytest.param( False, NO_REASONING, id="no_reasoning", ), pytest.param(False, NO_REASONING_QUICK_THROUGHT, id="no_reasoning_quick"), pytest.param( False, MULTIPLE_LINES, id="multiple_lines", ), pytest.param( False, REASONING_WITH_THINK, id="reasoning_with_think", ), pytest.param( False, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think", ), pytest.param( False, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think", ), pytest.param( True, SIMPLE_REASONING, id="simple_reasoning_streaming", ), pytest.param( True, COMPLETE_REASONING, id="complete_reasoning_streaming", ), pytest.param( True, NO_REASONING, id="no_reasoning_streaming", ), pytest.param(True, NO_REASONING_QUICK_THROUGHT, id="no_reasoning_quick_stream"), pytest.param( True, MULTIPLE_LINES, id="multiple_lines_streaming", ), pytest.param( True, REASONING_WITH_THINK, id="reasoning_with_think_streaming", ), pytest.param( True, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think_streaming", ), pytest.param( True, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think_streaming", ), ] # Global tokenizer initialization to avoid repeated loading tokenizer = AutoTokenizer.from_pretrained( "tencent/Hunyuan-A13B-Instruct", trust_remote_code=True ) @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_reasoning( streaming: bool, param_dict: dict, ): output = tokenizer.tokenize(param_dict["output"]) # decode everything to tokens output_tokens: list[str] = [ tokenizer.convert_tokens_to_string([token]) for token in output ] parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( tokenizer ) reasoning, content = run_reasoning_extraction( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_mistral_reasoning_parser.py
tests/reasoning/test_mistral_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from tests.reasoning.utils import run_reasoning_extraction_mistral from vllm.reasoning import ReasoningParser, ReasoningParserManager from vllm.tokenizers.mistral import MistralTokenizer parser_name = "mistral" @pytest.fixture(scope="module") def mistral_tokenizer(): mistral_tokenizer = MistralTokenizer.from_pretrained( "mistralai/Magistral-Small-2509" ) return mistral_tokenizer INVALID_SIMPLE_REASONING = { "output": "This is a reasoning section[/THINK]This is the rest", "reasoning": None, "content": "This is a reasoning sectionThis is the rest", "is_reasoning_end": False, } INVALID_COMPLETE_REASONING = { "output": "This is a reasoning section[/THINK]", "reasoning": None, "content": "This is a reasoning section", "is_reasoning_end": False, } NO_CONTENT = { "output": "[THINK]This is reasoning", "reasoning": "This is reasoning", "content": None, "is_reasoning_end": False, } NO_REASONING = { "output": "This is content", "reasoning": None, "content": "This is content", "is_reasoning_end": False, } NO_REASONING_STREAMING = { "output": "This is a reasoning section", "reasoning": None, "content": "This is a reasoning section", "is_reasoning_end": False, } INVALID_MULTIPLE_LINES = { "output": "This\nThat[/THINK]This is the rest\nThat", "reasoning": None, "content": "This\nThatThis is the rest\nThat", "is_reasoning_end": False, } INVALID_SHORTEST_REASONING_NO_STREAMING = { "output": "[/THINK]This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": False, } INVALID_SHORTEST_REASONING = { "output": "[/THINK]This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": False, } REASONING_WITH_THINK = { "output": "[THINK]This is a reasoning section[/THINK]This is the rest", "reasoning": "This is a reasoning section", "content": "This is the rest", "is_reasoning_end": True, } COMPLETE_REASONING_WITH_THINK = { "output": "[THINK]This is a reasoning section[/THINK]", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": True, } MULTIPLE_LINES_WITH_THINK = { "output": "[THINK]This\nThat[/THINK]This is the rest\nThat", "reasoning": "This\nThat", "content": "This is the rest\nThat", "is_reasoning_end": True, } INVALID_SHORTEST_REASONING_NO_STREAMING_WITH_THINK = { "output": "[/THINK]This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": False, } INVALID_SHORTEST_REASONING_WITH_THINK = { "output": "[/THINK]This is the rest", "reasoning": None, "content": "This is the rest", "is_reasoning_end": False, } THINK_NO_END = { "output": "[THINK]This is a reasoning section", "reasoning": "This is a reasoning section", "content": None, "is_reasoning_end": False, } EMPTY = { "output": "", "reasoning": None, "content": "", "is_reasoning_end": False, } EMPTY_STREAMING = { "output": "", "reasoning": None, "content": None, "is_reasoning_end": False, } NEW_LINE = { "output": "Before\n[THINK]This is a reasoning section[/THINK]\nThis is the rest", "reasoning": "This is a reasoning section", "content": "Before\n\nThis is the rest", "is_reasoning_end": True, } NEW_LINE_STREAMING = { "output": "Before\n[THINK]This is a reasoning section[/THINK]\nThis is the rest", "reasoning": "This is a reasoning section", "content": "Before\n\nThis is the rest", "is_reasoning_end": True, } TEST_CASES = [ pytest.param( False, INVALID_SIMPLE_REASONING, id="invalid_simple_reasoning", ), pytest.param( True, INVALID_SIMPLE_REASONING, id="invalid_simple_reasoning_streaming", ), pytest.param( False, INVALID_COMPLETE_REASONING, id="invalid_complete_reasoning", ), pytest.param( True, INVALID_COMPLETE_REASONING, id="invalid_complete_reasoning_streaming", ), pytest.param( False, NO_CONTENT, id="no_content", ), pytest.param( False, NO_REASONING, id="no_reasoning", ), pytest.param( True, NO_REASONING_STREAMING, id="no_reasoning_token_streaming", ), pytest.param( False, INVALID_MULTIPLE_LINES, id="invalid_multiple_lines", ), pytest.param( True, INVALID_MULTIPLE_LINES, id="invalid_multiple_lines_streaming", ), pytest.param( True, INVALID_SHORTEST_REASONING, id="invalid_shortest", ), pytest.param( False, INVALID_SHORTEST_REASONING_NO_STREAMING, id="invalid_shortest_streaming", ), pytest.param( False, REASONING_WITH_THINK, id="reasoning_with_think", ), pytest.param( True, REASONING_WITH_THINK, id="reasoning_with_think_streaming", ), pytest.param( False, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think", ), pytest.param( True, COMPLETE_REASONING_WITH_THINK, id="complete_reasoning_with_think_streaming", ), pytest.param( False, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think", ), pytest.param( True, MULTIPLE_LINES_WITH_THINK, id="multiple_lines_with_think_streaming", ), pytest.param( False, INVALID_SHORTEST_REASONING_NO_STREAMING_WITH_THINK, id="invalid_shortest_with_think", ), pytest.param( True, INVALID_SHORTEST_REASONING_WITH_THINK, id="invalid_shortest_with_think_streaming", ), pytest.param( False, THINK_NO_END, id="think_no_end", ), pytest.param( True, THINK_NO_END, id="think_no_end_streaming", ), pytest.param( False, EMPTY, id="empty", ), pytest.param( True, EMPTY_STREAMING, id="empty_streaming", ), pytest.param( False, NEW_LINE, id="new_line", ), pytest.param( True, NEW_LINE_STREAMING, id="new_line_streaming", ), ] @pytest.mark.parametrize("streaming, param_dict", TEST_CASES) def test_mistral_reasoning( streaming: bool, param_dict: dict, mistral_tokenizer: MistralTokenizer, ): output = param_dict["output"] index_think = output.find("[THINK]") len_think = len("[THINK]") index_end_think = output.find("[/THINK]") len_end_think = len("[/THINK]") # encode everything to tokens ids output_tokens = [] if index_think != -1: output_before_think = output[:index_think] output_tokens += mistral_tokenizer.tokenizer.encode( output_before_think, False, False ) output_tokens += [mistral_tokenizer.instruct.BEGIN_THINK] if index_end_think != -1: output_middle = output[index_think + len_think : index_end_think] output_after_think = output[index_end_think + len_end_think :] output_tokens += mistral_tokenizer.tokenizer.encode( output_middle, False, False ) output_tokens += [mistral_tokenizer.instruct.END_THINK] output_tokens += mistral_tokenizer.tokenizer.encode( output_after_think, False, False ) else: output_middle = output[index_think + len_think :] output_tokens += mistral_tokenizer.tokenizer.encode( output_middle, False, False ) elif index_end_think != -1: output_before_think = output[:index_end_think] output_after_think = output[index_end_think + len_end_think :] output_tokens += mistral_tokenizer.tokenizer.encode( output_before_think, False, False ) output_tokens += [mistral_tokenizer.instruct.END_THINK] output_tokens += mistral_tokenizer.tokenizer.encode( output_after_think, False, False ) else: output_tokens += mistral_tokenizer.tokenizer.encode(output, False, False) parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)( mistral_tokenizer ) reasoning, content = run_reasoning_extraction_mistral( parser, output_tokens, streaming=streaming ) assert reasoning == param_dict["reasoning"] assert content == param_dict["content"] # Test is_reasoning_end is_reasoning_end = parser.is_reasoning_end(output_tokens) assert is_reasoning_end == param_dict["is_reasoning_end"] # Test extract_content if param_dict["content"] is not None: # Handle the case where there are tokens outputted before Thinking. # This should not occur if the model is well trained and prompted. if "[THINK]" in param_dict["output"] and not param_dict["output"].startswith( "[THINK]" ): before_content = param_dict["output"].split("[THINK]")[0] before_token_ids = mistral_tokenizer.tokenizer.encode( before_content, bos=False, eos=False ) left_to_encode = param_dict["content"][len(before_content) :] # Normal situation. else: before_token_ids = [] left_to_encode = param_dict["content"] content_tokens = parser.extract_content_ids(output_tokens) expected_token_ids = before_token_ids + mistral_tokenizer.tokenizer.encode( left_to_encode, bos=False, eos=False ) assert content_tokens == expected_token_ids else: content = parser.extract_content_ids(output_tokens) assert content == []
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/reasoning/test_base_thinking_reasoning_parser.py
tests/reasoning/test_base_thinking_reasoning_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from transformers import AutoTokenizer from tests.reasoning.utils import run_reasoning_extraction from vllm.entrypoints.openai.protocol import ChatCompletionRequest from vllm.reasoning.basic_parsers import BaseThinkingReasoningParser # Create a concrete test implementation of BaseThinkingReasoningParser class TestThinkingReasoningParser(BaseThinkingReasoningParser): """Test implementation of BaseThinkingReasoningParser.""" @property def start_token(self) -> str: return "<test:think>" @property def end_token(self) -> str: return "</test:think>" class TestThinkingReasoningParserAlt(BaseThinkingReasoningParser): """Alternative test implementation with different tokens.""" @property def start_token(self) -> str: return "<alt:start>" @property def end_token(self) -> str: return "<alt:end>" # Use a test model REASONING_MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" @pytest.fixture(scope="module") def test_tokenizer(): tokenizer = AutoTokenizer.from_pretrained(REASONING_MODEL_NAME) # Add custom test tokens test_tokens = ["<test:think>", "</test:think>", "<alt:start>", "<alt:end>"] existing_tokens = set(tokenizer.get_vocab().keys()) new_tokens = [token for token in test_tokens if token not in existing_tokens] if new_tokens: tokenizer.add_tokens(new_tokens) return tokenizer class TestBaseThinkingReasoningParserInit: """ Test initialization and basic properties of BaseThinkingReasoningParser. """ def test_successful_initialization(self, test_tokenizer): """Test successful initialization with valid tokens.""" parser = TestThinkingReasoningParser(test_tokenizer) assert parser.start_token == "<test:think>" assert parser.end_token == "</test:think>" assert parser.start_token_id is not None assert parser.end_token_id is not None def test_initialization_with_missing_tokenizer(self): """Test that initialization fails without tokenizer.""" with pytest.raises(ValueError, match="model tokenizer must be passed"): TestThinkingReasoningParser(None) def test_initialization_with_missing_tokens(self, test_tokenizer): """Test that initialization fails when tokens are not in vocabulary.""" # Create a parser with tokens not in vocabulary class MissingTokenParser(BaseThinkingReasoningParser): @property def start_token(self) -> str: return "<missing:start>" @property def end_token(self) -> str: return "<missing:end>" with pytest.raises( RuntimeError, match="could not locate think start/end tokens" ): MissingTokenParser(test_tokenizer) def test_initialization_with_empty_tokens(self, test_tokenizer): """Test that initialization fails with empty token strings.""" class EmptyTokenParser(BaseThinkingReasoningParser): @property def start_token(self) -> str: return "" @property def end_token(self) -> str: return "" with pytest.raises( ValueError, match="start_token and end_token must be defined" ): EmptyTokenParser(test_tokenizer) class TestBaseThinkingReasoningParserMethods: """Test the methods of BaseThinkingReasoningParser.""" def test_is_reasoning_end(self, test_tokenizer): """Test the is_reasoning_end method.""" parser = TestThinkingReasoningParser(test_tokenizer) end_token_id = parser.end_token_id start_token_id = parser.start_token_id # Test with end token present assert parser.is_reasoning_end([1, 2, end_token_id, 4]) is True # Test without end token assert parser.is_reasoning_end([1, 2, 3, 4]) is False # Test with empty list assert parser.is_reasoning_end([]) is False # Test with interleaved thinking assert parser.is_reasoning_end([1, start_token_id, 2, end_token_id]) is True assert parser.is_reasoning_end([1, start_token_id, 2, 3]) is False assert ( parser.is_reasoning_end( [1, start_token_id, 2, end_token_id, 2, 2, start_token_id] ) is False ) def test_is_reasoning_end_streaming(self, test_tokenizer): """Test the is_reasoning_end_streaming method.""" parser = TestThinkingReasoningParser(test_tokenizer) end_token_id = parser.end_token_id start_token_id = parser.start_token_id assert ( parser.is_reasoning_end_streaming([1, 2, end_token_id], [end_token_id]) is True ) assert parser.is_reasoning_end_streaming([1, 2, 3, 4], [4]) is False assert parser.is_reasoning_end_streaming([], []) is False assert ( parser.is_reasoning_end_streaming( [1, start_token_id, 2, end_token_id], [end_token_id] ) is True ) assert ( parser.is_reasoning_end_streaming([1, start_token_id, 2, 3], [3]) is False ) assert ( parser.is_reasoning_end_streaming( [1, start_token_id, 2, end_token_id, 2, start_token_id, 2], [2], ) is False ) assert ( parser.is_reasoning_end_streaming( [1, start_token_id, 2, end_token_id, 2, 2], [2] ) is False ) def test_extract_content_ids(self, test_tokenizer): """Test the extract_content_ids method.""" parser = TestThinkingReasoningParser(test_tokenizer) end_token_id = parser.end_token_id # Test with end token in the middle input_ids = [1, 2, end_token_id, 4, 5] content_ids = parser.extract_content_ids(input_ids) assert content_ids == [4, 5] # Test with end token at the end input_ids = [1, 2, 3, end_token_id] content_ids = parser.extract_content_ids(input_ids) assert content_ids == [] # Test without end token input_ids = [1, 2, 3, 4] content_ids = parser.extract_content_ids(input_ids) assert content_ids == [] # Test with end token as last element (should not extract) input_ids = [1, 2, 3, end_token_id] content_ids = parser.extract_content_ids(input_ids) assert content_ids == [] class TestBaseThinkingReasoningParserExtraction: """Test reasoning content extraction methods.""" def test_extract_reasoning_with_both_tokens(self, test_tokenizer): """Test extraction when both start and end tokens are present.""" parser = TestThinkingReasoningParser(test_tokenizer) request = ChatCompletionRequest(messages=[], model="test-model") model_output = "<test:think>This is reasoning</test:think>This is content" reasoning, content = parser.extract_reasoning(model_output, request) assert reasoning == "This is reasoning" assert content == "This is content" def test_extract_reasoning_only_end_token(self, test_tokenizer): """Test extraction when only end token is present.""" parser = TestThinkingReasoningParser(test_tokenizer) request = ChatCompletionRequest(messages=[], model="test-model") model_output = "This is reasoning</test:think>This is content" reasoning, content = parser.extract_reasoning(model_output, request) assert reasoning == "This is reasoning" assert content == "This is content" def test_extract_reasoning_no_end_token(self, test_tokenizer): """Test extraction when no end token is present.""" parser = TestThinkingReasoningParser(test_tokenizer) request = ChatCompletionRequest(messages=[], model="test-model") model_output = "This is just content" reasoning, content = parser.extract_reasoning(model_output, request) assert reasoning == "This is just content" assert content is None def test_extract_reasoning_empty_output(self, test_tokenizer): """Test extraction with empty output.""" parser = TestThinkingReasoningParser(test_tokenizer) request = ChatCompletionRequest(messages=[], model="test-model") model_output = "" reasoning, content = parser.extract_reasoning(model_output, request) assert reasoning == "" assert content is None def test_extract_reasoning_only_tokens(self, test_tokenizer): """Test extraction with only tokens and no content.""" parser = TestThinkingReasoningParser(test_tokenizer) request = ChatCompletionRequest(messages=[], model="test-model") model_output = "<test:think></test:think>" reasoning, content = parser.extract_reasoning(model_output, request) assert reasoning == "" assert content is None class TestBaseThinkingReasoningParserStreaming: """Test streaming functionality of BaseThinkingReasoningParser.""" @pytest.mark.parametrize("streaming", [True, False]) def test_simple_reasoning_extraction(self, test_tokenizer, streaming): """ Test basic reasoning extraction in both streaming and non-streaming modes. """ parser = TestThinkingReasoningParser(test_tokenizer) model_output = [ "<test:think>", "Some ", "reasoning ", "content", "</test:think>", "Final ", "answer", ] reasoning, content = run_reasoning_extraction( parser, model_output, streaming=streaming ) assert reasoning == "Some reasoning content" assert content == "Final answer" def test_streaming_with_incremental_deltas(self, test_tokenizer): """Test streaming processing with small incremental deltas.""" parser = TestThinkingReasoningParser(test_tokenizer) deltas = [ "<test:think>", "Some ", "reasoning ", "content", "</test:think>", "Final ", "answer", ] reasoning, content = run_reasoning_extraction(parser, deltas, streaming=True) assert reasoning == "Some reasoning content" assert content == "Final answer" def test_streaming_with_start_token(self, test_tokenizer): """Test streaming with start token included.""" parser = TestThinkingReasoningParser(test_tokenizer) deltas = [ "<test:think>", "Some ", "reasoning", "</test:think>", "Answer", ] reasoning, content = run_reasoning_extraction(parser, deltas, streaming=True) assert reasoning == "Some reasoning" assert content == "Answer" def test_streaming_no_end_token(self, test_tokenizer): """Test streaming when no end token is encountered.""" parser = TestThinkingReasoningParser(test_tokenizer) deltas = [ "<test:think>", "Some ", "reasoning ", "without ", "end", ] reasoning, content = run_reasoning_extraction(parser, deltas, streaming=True) assert reasoning == "Some reasoning without end" assert content is None def test_streaming_only_end_token(self, test_tokenizer): """Test streaming when only end token appears.""" parser = TestThinkingReasoningParser(test_tokenizer) deltas = [ "<test:think>", "Reasoning ", "content", "</test:think>", "Final", ] reasoning, content = run_reasoning_extraction(parser, deltas, streaming=True) assert reasoning == "Reasoning content" assert content == "Final" class TestBaseThinkingReasoningParserMultipleImplementations: """ Test that multiple implementations of BaseThinkingReasoningParser work correctly. """ def test_different_token_implementations(self, test_tokenizer): """ Test that different implementations with different tokens work independently. """ parser1 = TestThinkingReasoningParser(test_tokenizer) parser2 = TestThinkingReasoningParserAlt(test_tokenizer) # Test parser1 model_output1 = "Reasoning1</test:think>Content1" reasoning1, content1 = run_reasoning_extraction(parser1, [model_output1]) assert reasoning1 == "Reasoning1" assert content1 == "Content1" # Test parser2 model_output2 = "Reasoning2<alt:end>Content2" reasoning2, content2 = run_reasoning_extraction(parser2, [model_output2]) assert reasoning2 == "Reasoning2" assert content2 == "Content2" # Verify tokens are different assert parser1.start_token != parser2.start_token assert parser1.end_token != parser2.end_token assert parser1.start_token_id != parser2.start_token_id assert parser1.end_token_id != parser2.end_token_id class TestBaseThinkingReasoningParserEdgeCases: """Test edge cases and error conditions.""" def test_multiple_end_tokens(self, test_tokenizer): """Test behavior with multiple end tokens.""" parser = TestThinkingReasoningParser(test_tokenizer) model_output = "First</test:think>Middle</test:think>Last" reasoning, content = run_reasoning_extraction(parser, [model_output]) # Should stop at first end token assert reasoning == "First" assert content == "Middle</test:think>Last" def test_nested_tokens(self, test_tokenizer): """Test behavior with nested-like token patterns.""" parser = TestThinkingReasoningParser(test_tokenizer) model_output = "<test:think>Outer<test:think>Inner</test:think>Content" reasoning, content = run_reasoning_extraction(parser, [model_output]) # Should process normally, start from first start token assert reasoning == "Outer<test:think>Inner" assert content == "Content" def test_malformed_tokens(self, test_tokenizer): """Test behavior with malformed token-like strings.""" parser = TestThinkingReasoningParser(test_tokenizer) model_output = "<test:thinking>Not a real token</test:thinking>Content" reasoning, content = run_reasoning_extraction(parser, [model_output]) # Should treat as regular content since tokens don't match exactly assert reasoning == ("<test:thinking>Not a real token</test:thinking>Content") assert content is None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_stat_logger/setup.py
tests/plugins/vllm_add_dummy_stat_logger/setup.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from setuptools import setup setup( name="dummy_stat_logger", version="0.1", packages=["dummy_stat_logger"], entry_points={ "vllm.stat_logger_plugins": [ "dummy_stat_logger = dummy_stat_logger.dummy_stat_logger:DummyStatLogger" # noqa ] }, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_stat_logger/dummy_stat_logger/dummy_stat_logger.py
tests/plugins/vllm_add_dummy_stat_logger/dummy_stat_logger/dummy_stat_logger.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.v1.metrics.loggers import StatLoggerBase class DummyStatLogger(StatLoggerBase): """ A dummy stat logger for testing purposes. Implements the minimal interface expected by StatLoggerManager. """ def __init__(self, vllm_config, engine_idx=0): self.vllm_config = vllm_config self.engine_idx = engine_idx self.recorded = [] self.logged = False self.engine_initialized = False def record(self, scheduler_stats, iteration_stats, mm_cache_stats, engine_idx): self.recorded.append( (scheduler_stats, iteration_stats, mm_cache_stats, engine_idx) ) def log(self): self.logged = True def log_engine_initialized(self): self.engine_initialized = True
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_model/setup.py
tests/plugins/vllm_add_dummy_model/setup.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from setuptools import setup setup( name="vllm_add_dummy_model", version="0.1", packages=["vllm_add_dummy_model"], entry_points={ "vllm.general_plugins": ["register_dummy_model = vllm_add_dummy_model:register"] }, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_opt.py
tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_opt.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch from vllm.model_executor.models.opt import OPTForCausalLM class MyOPTForCausalLM(OPTForCausalLM): def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor | None: # this dummy model always predicts the first token logits = super().compute_logits(hidden_states) if logits is not None: logits.zero_() logits[:, 0] += 1.0 return logits
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_gemma_embedding.py
tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_gemma_embedding.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Iterable import torch import torch.nn as nn from vllm.config import VllmConfig from vllm.model_executor.layers.pooler import DispatchPooler, Pooler from vllm.model_executor.models.gemma2 import Gemma2Model from vllm.model_executor.models.utils import WeightsMapper, maybe_prefix from vllm.sequence import IntermediateTensors class MyGemma2Embedding(nn.Module): is_pooling_model = True hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() self.model = Gemma2Model( vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model") ) pooler_config = vllm_config.model_config.pooler_config assert pooler_config is not None self.pooler = DispatchPooler( { "token_embed": Pooler.for_token_embed(pooler_config), "embed": Pooler.for_embed(pooler_config), } ) self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors ) def forward( self, input_ids: torch.Tensor, positions: torch.Tensor, intermediate_tensors: IntermediateTensors | None = None, inputs_embeds: torch.Tensor | None = None, ) -> torch.Tensor | IntermediateTensors: hidden_states = self.model( input_ids, positions, intermediate_tensors=intermediate_tensors, inputs_embeds=inputs_embeds, ) if isinstance(hidden_states, IntermediateTensors): return hidden_states # Return all-zero embeddings return torch.zeros_like(hidden_states) def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]): weights = self.hf_to_vllm_mapper.apply(weights) weights = ( (name, data) for name, data in weights if not name.startswith("lm_head.") ) return self.model.load_weights(weights)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/__init__.py
tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import ModelRegistry def register(): # Test directly passing the model from .my_opt import MyOPTForCausalLM if "MyOPTForCausalLM" not in ModelRegistry.get_supported_archs(): ModelRegistry.register_model("MyOPTForCausalLM", MyOPTForCausalLM) # Test passing lazy model if "MyGemma2Embedding" not in ModelRegistry.get_supported_archs(): ModelRegistry.register_model( "MyGemma2Embedding", "vllm_add_dummy_model.my_gemma_embedding:MyGemma2Embedding", ) if "MyLlava" not in ModelRegistry.get_supported_archs(): ModelRegistry.register_model("MyLlava", "vllm_add_dummy_model.my_llava:MyLlava")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py
tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch from vllm.model_executor.models.llava import ( LlavaDummyInputsBuilder, LlavaForConditionalGeneration, LlavaMultiModalProcessor, LlavaProcessingInfo, ) from vllm.multimodal import MULTIMODAL_REGISTRY @MULTIMODAL_REGISTRY.register_processor( LlavaMultiModalProcessor, info=LlavaProcessingInfo, dummy_inputs=LlavaDummyInputsBuilder, ) class MyLlava(LlavaForConditionalGeneration): def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor | None: # this dummy model always predicts the first token logits = super().compute_logits(hidden_states) if logits is not None: logits.zero_() logits[:, 0] += 1.0 return logits
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_platform/setup.py
tests/plugins/vllm_add_dummy_platform/setup.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from setuptools import setup setup( name="vllm_add_dummy_platform", version="0.1", packages=["vllm_add_dummy_platform"], entry_points={ "vllm.platform_plugins": [ "dummy_platform_plugin = vllm_add_dummy_platform:dummy_platform_plugin" # noqa ], "vllm.general_plugins": [ "dummy_custom_ops = vllm_add_dummy_platform:register_ops" ], }, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_attention_backend.py
tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_attention_backend.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm.attention.backends.placeholder_attn import PlaceholderAttentionBackend class DummyAttentionBackend(PlaceholderAttentionBackend): @staticmethod def get_name() -> str: return "Dummy_Backend"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_platform.py
tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_platform.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import TYPE_CHECKING from vllm.platforms.interface import Platform, PlatformEnum if TYPE_CHECKING: from vllm.config import VllmConfig else: VllmConfig = None class DummyPlatform(Platform): _enum = PlatformEnum.OOT device_name = "DummyDevice" device_type: str = "privateuseone" dispatch_key: str = "PrivateUse1" @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: vllm_config.compilation_config.custom_ops = ["all"] def get_attn_backend_cls( self, backend_name, head_size, dtype, kv_cache_dtype, block_size, use_mla, has_sink, use_sparse, use_mm_prefix, ): return "vllm_add_dummy_platform.dummy_attention_backend.DummyAttentionBackend" # noqa E501
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_custom_ops.py
tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_custom_ops.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding # Register CustomRotaryEmbedding to CustomOP. @RotaryEmbedding.register_oot class DummyRotaryEmbedding(RotaryEmbedding): """Original rotary positional embedding.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.addition_config = True def forward_oot(self, *args, **kwargs) -> tuple[torch.Tensor, torch.Tensor]: return super().forward_oot(*args, **kwargs)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/__init__.py
tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project def dummy_platform_plugin() -> str | None: return "vllm_add_dummy_platform.dummy_platform.DummyPlatform" def register_ops(): import vllm_add_dummy_platform.dummy_custom_ops # noqa
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/lora_resolvers/test_filesystem_resolver.py
tests/plugins/lora_resolvers/test_filesystem_resolver.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import shutil import pytest from huggingface_hub import snapshot_download from vllm.plugins.lora_resolvers.filesystem_resolver import FilesystemResolver MODEL_NAME = "Qwen/Qwen3-0.6B" LORA_NAME = "charent/self_cognition_Alice" PA_NAME = "swapnilbp/llama_tweet_ptune" @pytest.fixture(scope="module") def adapter_cache(request, tmpdir_factory): # Create dir that mimics the structure of the adapter cache adapter_cache = tmpdir_factory.mktemp(request.module.__name__) / "adapter_cache" return adapter_cache @pytest.fixture(scope="module") def qwen3_lora_files(): return snapshot_download(repo_id=LORA_NAME) @pytest.fixture(scope="module") def pa_files(): return snapshot_download(repo_id=PA_NAME) @pytest.mark.asyncio async def test_filesystem_resolver(adapter_cache, qwen3_lora_files): model_files = adapter_cache / LORA_NAME shutil.copytree(qwen3_lora_files, model_files) fs_resolver = FilesystemResolver(adapter_cache) assert fs_resolver is not None lora_request = await fs_resolver.resolve_lora(MODEL_NAME, LORA_NAME) assert lora_request is not None assert lora_request.lora_name == LORA_NAME assert lora_request.lora_path == os.path.join(adapter_cache, LORA_NAME) @pytest.mark.asyncio async def test_missing_adapter(adapter_cache): fs_resolver = FilesystemResolver(adapter_cache) assert fs_resolver is not None missing_lora_request = await fs_resolver.resolve_lora(MODEL_NAME, "foobar") assert missing_lora_request is None @pytest.mark.asyncio async def test_nonlora_adapter(adapter_cache, pa_files): model_files = adapter_cache / PA_NAME shutil.copytree(pa_files, model_files) fs_resolver = FilesystemResolver(adapter_cache) assert fs_resolver is not None pa_request = await fs_resolver.resolve_lora(MODEL_NAME, PA_NAME) assert pa_request is None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/lora_resolvers/__init__.py
tests/plugins/lora_resolvers/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/prithvi_io_processor_plugin/setup.py
tests/plugins/prithvi_io_processor_plugin/setup.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from setuptools import setup setup( name="prithvi_io_processor_plugin", version="0.1", packages=["prithvi_io_processor"], entry_points={ "vllm.io_processor_plugins": [ "prithvi_to_tiff = prithvi_io_processor:register_prithvi", # noqa: E501 ] }, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/prithvi_io_processor_plugin/prithvi_io_processor/__init__.py
tests/plugins/prithvi_io_processor_plugin/prithvi_io_processor/__init__.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project def register_prithvi(): return "prithvi_io_processor.prithvi_processor.PrithviMultimodalDataProcessor" # noqa: E501
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/prithvi_io_processor_plugin/prithvi_io_processor/types.py
tests/plugins/prithvi_io_processor_plugin/prithvi_io_processor/types.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import Any, Literal, TypedDict import albumentations from pydantic import BaseModel class DataModuleConfig(TypedDict): bands: list[str] batch_size: int constant_scale: float data_root: str drop_last: bool no_data_replace: float no_label_replace: int num_workers: int test_transform: list[albumentations.core.transforms_interface.BasicTransform] class ImagePrompt(BaseModel): data_format: Literal["b64_json", "bytes", "url", "path"] """ This is the data type for the input image """ image_format: str """ This is the image format (e.g., jpeg, png, etc.) """ out_data_format: Literal["b64_json", "url"] data: Any """ Input image data """ MultiModalPromptType = ImagePrompt class ImageRequestOutput(BaseModel): """ The output data of an image request to vLLM. Args: type (str): The data content type [path, object] format (str): The image format (e.g., jpeg, png, etc.) data (Any): The resulting data. """ type: Literal["path", "b64_json"] format: str data: str request_id: str | None = None
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/plugins/prithvi_io_processor_plugin/prithvi_io_processor/prithvi_processor.py
tests/plugins/prithvi_io_processor_plugin/prithvi_io_processor/prithvi_processor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import base64 import datetime import os import tempfile import urllib.request from collections.abc import Sequence from typing import Any import albumentations import numpy as np import rasterio import regex as re import torch from einops import rearrange from terratorch.datamodules import Sen1Floods11NonGeoDataModule from vllm.config import VllmConfig from vllm.entrypoints.pooling.pooling.protocol import ( IOProcessorRequest, IOProcessorResponse, ) from vllm.inputs.data import PromptType from vllm.logger import init_logger from vllm.outputs import PoolingRequestOutput from vllm.plugins.io_processors.interface import ( IOProcessor, IOProcessorInput, IOProcessorOutput, ) from .types import DataModuleConfig, ImagePrompt, ImageRequestOutput logger = init_logger(__name__) NO_DATA = -9999 NO_DATA_FLOAT = 0.0001 OFFSET = 0 PERCENTILE = 99 DEFAULT_INPUT_INDICES = [0, 1, 2, 3, 4, 5] datamodule_config: DataModuleConfig = { "bands": ["BLUE", "GREEN", "RED", "NIR_NARROW", "SWIR_1", "SWIR_2"], "batch_size": 16, "constant_scale": 0.0001, "data_root": "/dccstor/geofm-finetuning/datasets/sen1floods11", "drop_last": True, "no_data_replace": 0.0, "no_label_replace": -1, "num_workers": 8, "test_transform": [ albumentations.Resize( always_apply=False, height=448, interpolation=1, p=1, width=448 ), albumentations.pytorch.ToTensorV2( transpose_mask=False, always_apply=True, p=1.0 ), ], } def save_geotiff(image: torch.Tensor, meta: dict, out_format: str) -> str | bytes: """Save multi-band image in Geotiff file. Args: image: np.ndarray with shape (bands, height, width) output_path: path where to save the image meta: dict with meta info. """ if out_format == "path": # create temp file file_path = os.path.join(os.getcwd(), "prediction.tiff") with rasterio.open(file_path, "w", **meta) as dest: for i in range(image.shape[0]): dest.write(image[i, :, :], i + 1) return file_path elif out_format == "b64_json": with tempfile.NamedTemporaryFile() as tmpfile: with rasterio.open(tmpfile.name, "w", **meta) as dest: for i in range(image.shape[0]): dest.write(image[i, :, :], i + 1) file_data = tmpfile.read() return base64.b64encode(file_data) else: raise ValueError("Unknown output format") def _convert_np_uint8(float_image: torch.Tensor): image = float_image.numpy() * 255.0 image = image.astype(dtype=np.uint8) return image def read_geotiff( file_path: str | None = None, path_type: str | None = None, file_data: bytes | None = None, ) -> tuple[torch.Tensor, dict, tuple[float, float] | None]: """Read all bands from *file_path* and return image + meta info. Args: file_path: path to image file. Returns: np.ndarray with shape (bands, height, width) meta info dict """ if all([x is None for x in [file_path, path_type, file_data]]): raise Exception("All input fields to read_geotiff are None") write_to_file: bytes | None = None path: str | None = None if file_data is not None: # with tempfile.NamedTemporaryFile() as tmpfile: # tmpfile.write(file_data) # path = tmpfile.name write_to_file = file_data elif file_path is not None and path_type == "url": resp = urllib.request.urlopen(file_path) # with tempfile.NamedTemporaryFile() as tmpfile: # tmpfile.write(resp.read()) # path = tmpfile.name write_to_file = resp.read() elif file_path is not None and path_type == "path": path = file_path elif file_path is not None and path_type == "b64_json": image_data = base64.b64decode(file_path) # with tempfile.NamedTemporaryFile() as tmpfile: # tmpfile.write(image_data) # path = tmpfile.name write_to_file = image_data else: raise Exception("Wrong combination of parameters to read_geotiff") with tempfile.NamedTemporaryFile() as tmpfile: path_to_use = None if write_to_file: tmpfile.write(write_to_file) path_to_use = tmpfile.name elif path: path_to_use = path with rasterio.open(path_to_use) as src: img = src.read() meta = src.meta try: coords = src.lnglat() except Exception: # Cannot read coords coords = None return img, meta, coords def load_image( data: list[str], path_type: str, mean: list[float] | None = None, std: list[float] | None = None, indices: list[int] | None | None = None, ): """Build an input example by loading images in *file_paths*. Args: file_paths: list of file paths . mean: list containing mean values for each band in the images in *file_paths*. std: list containing std values for each band in the images in *file_paths*. Returns: np.array containing created example list of meta info for each image in *file_paths* """ imgs = [] metas = [] temporal_coords = [] location_coords = [] for file in data: # if isinstance(file, bytes): # img, meta, coords = read_geotiff(file_data=file) # else: img, meta, coords = read_geotiff(file_path=file, path_type=path_type) # Rescaling (don't normalize on nodata) img = np.moveaxis(img, 0, -1) # channels last for rescaling if indices is not None: img = img[..., indices] if mean is not None and std is not None: img = np.where(img == NO_DATA, NO_DATA_FLOAT, (img - mean) / std) imgs.append(img) metas.append(meta) if coords is not None: location_coords.append(coords) try: match = re.search(r"(\d{7,8}T\d{6})", file) if match: year = int(match.group(1)[:4]) julian_day = match.group(1).split("T")[0][4:] if len(julian_day) == 3: julian_day = int(julian_day) else: julian_day = ( datetime.datetime.strptime(julian_day, "%m%d") .timetuple() .tm_yday ) temporal_coords.append([year, julian_day]) except Exception: logger.exception("Could not extract timestamp for %s", file) imgs = np.stack(imgs, axis=0) # num_frames, H, W, C imgs = np.moveaxis(imgs, -1, 0).astype("float32") # C, num_frames, H, W imgs = np.expand_dims(imgs, axis=0) # add batch di return imgs, temporal_coords, location_coords, metas class PrithviMultimodalDataProcessor(IOProcessor): indices = [0, 1, 2, 3, 4, 5] def __init__(self, vllm_config: VllmConfig): super().__init__(vllm_config) self.datamodule = Sen1Floods11NonGeoDataModule( data_root=datamodule_config["data_root"], batch_size=datamodule_config["batch_size"], num_workers=datamodule_config["num_workers"], bands=datamodule_config["bands"], drop_last=datamodule_config["drop_last"], test_transform=datamodule_config["test_transform"], ) self.img_size = 512 self.h1 = 1 self.w1 = 1 self.original_h = 512 self.original_w = 512 self.batch_size = 1 self.meta_data = None self.requests_cache: dict[str, dict[str, Any]] = {} self.indices = DEFAULT_INPUT_INDICES def parse_request(self, request: Any) -> IOProcessorInput: if type(request) is dict: image_prompt = ImagePrompt(**request) return image_prompt if isinstance(request, IOProcessorRequest): if not hasattr(request, "data"): raise ValueError("missing 'data' field in OpenAIBaseModel Request") request_data = request.data if type(request_data) is dict: return ImagePrompt(**request_data) else: raise ValueError("Unable to parse the request data") raise ValueError("Unable to parse request") def output_to_response( self, plugin_output: IOProcessorOutput ) -> IOProcessorResponse: return IOProcessorResponse( request_id=plugin_output.request_id, data=plugin_output, ) def pre_process( self, prompt: IOProcessorInput, request_id: str | None = None, **kwargs, ) -> PromptType | Sequence[PromptType]: image_data = dict(prompt) if request_id: self.requests_cache[request_id] = { "out_format": image_data["out_data_format"], } input_data, temporal_coords, location_coords, meta_data = load_image( data=[image_data["data"]], indices=self.indices, path_type=image_data["data_format"], ) self.meta_data = meta_data[0] if input_data.mean() > 1: input_data = input_data / 10000 # Convert to range 0-1 self.original_h, self.original_w = input_data.shape[-2:] pad_h = (self.img_size - (self.original_h % self.img_size)) % self.img_size pad_w = (self.img_size - (self.original_w % self.img_size)) % self.img_size input_data = np.pad( input_data, ((0, 0), (0, 0), (0, 0), (0, pad_h), (0, pad_w)), mode="reflect", ) batch = torch.tensor(input_data) windows = batch.unfold(3, self.img_size, self.img_size).unfold( 4, self.img_size, self.img_size ) self.h1, self.w1 = windows.shape[3:5] windows = rearrange( windows, "b c t h1 w1 h w -> (b h1 w1) c t h w", h=self.img_size, w=self.img_size, ) # Split into batches if number of windows > batch_size num_batches = ( windows.shape[0] // self.batch_size if windows.shape[0] > self.batch_size else 1 ) windows = torch.tensor_split(windows, num_batches, dim=0) if temporal_coords: temporal_coords = torch.tensor(temporal_coords).unsqueeze(0) else: temporal_coords = None if location_coords: location_coords = torch.tensor(location_coords[0]).unsqueeze(0) else: location_coords = None prompts = [] for window in windows: # Apply standardization window = self.datamodule.test_transform( image=window.squeeze().numpy().transpose(1, 2, 0) ) window = self.datamodule.aug(window)["image"] prompts.append( { "prompt_token_ids": [1], "multi_modal_data": { "pixel_values": window.to(torch.float16)[0], "location_coords": location_coords.to(torch.float16), }, } ) return prompts def post_process( self, model_output: Sequence[PoolingRequestOutput], request_id: str | None = None, **kwargs, ) -> IOProcessorOutput: pred_imgs_list = [] if request_id and (request_id in self.requests_cache): out_format = self.requests_cache[request_id]["out_format"] else: out_format = "b64_json" for output in model_output: y_hat = output.outputs.data.argmax(dim=0) pred = torch.nn.functional.interpolate( y_hat[None, None, ...].float(), size=self.img_size, mode="nearest", ) pred_imgs_list.append(pred) pred_imgs: torch.Tensor = torch.concat(pred_imgs_list, dim=0) # Build images from patches pred_imgs = rearrange( pred_imgs, "(b h1 w1) c h w -> b c (h1 h) (w1 w)", h=self.img_size, w=self.img_size, b=1, c=1, h1=self.h1, w1=self.w1, ) # Cut padded area back to original size pred_imgs = pred_imgs[..., : self.original_h, : self.original_w] # Squeeze (batch size 1) pred_imgs = pred_imgs[0] if not self.meta_data: raise ValueError("No metadata available for the current task") self.meta_data.update(count=1, dtype="uint8", compress="lzw", nodata=0) out_data = save_geotiff( _convert_np_uint8(pred_imgs), self.meta_data, out_format ) return ImageRequestOutput( type=out_format, format="tiff", data=out_data, request_id=request_id )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/basic_correctness/test_cumem.py
tests/basic_correctness/test_cumem.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import pytest import torch from vllm import LLM, AsyncEngineArgs, AsyncLLMEngine, SamplingParams from vllm.device_allocator.cumem import CuMemAllocator from vllm.platforms import current_platform from vllm.utils.mem_constants import GiB_bytes from ..utils import create_new_process_for_each_test, requires_fp8 @create_new_process_for_each_test("fork" if not current_platform.is_rocm() else "spawn") def test_python_error(): """ Test if Python error occurs when there's low-level error happening from the C++ side. """ allocator = CuMemAllocator.get_instance() total_bytes = torch.cuda.mem_get_info()[1] alloc_bytes = int(total_bytes * 0.7) tensors = [] with allocator.use_memory_pool(): # allocate 70% of the total memory x = torch.empty(alloc_bytes, dtype=torch.uint8, device="cuda") tensors.append(x) # release the memory allocator.sleep() # allocate more memory than the total memory y = torch.empty(alloc_bytes, dtype=torch.uint8, device="cuda") tensors.append(y) with pytest.raises(RuntimeError): # when the allocator is woken up, it should raise an error # because we don't have enough memory allocator.wake_up() @create_new_process_for_each_test("fork" if not current_platform.is_rocm() else "spawn") def test_basic_cumem(): # some tensors from default memory pool shape = (1024, 1024) x = torch.empty(shape, device="cuda") x.zero_() # some tensors from custom memory pool allocator = CuMemAllocator.get_instance() with allocator.use_memory_pool(): # custom memory pool y = torch.empty(shape, device="cuda") y.zero_() y += 1 z = torch.empty(shape, device="cuda") z.zero_() z += 2 # they can be used together output = x + y + z assert torch.allclose(output, torch.ones_like(output) * 3) free_bytes = torch.cuda.mem_get_info()[0] allocator.sleep() free_bytes_after_sleep = torch.cuda.mem_get_info()[0] assert free_bytes_after_sleep > free_bytes allocator.wake_up() # they can be used together output = x + y + z assert torch.allclose(output, torch.ones_like(output) * 3) @create_new_process_for_each_test("fork" if not current_platform.is_rocm() else "spawn") def test_cumem_with_cudagraph(): allocator = CuMemAllocator.get_instance() with allocator.use_memory_pool(): weight = torch.eye(1024, device="cuda") with allocator.use_memory_pool(tag="discard"): cache = torch.empty(1024, 1024, device="cuda") def model(x): out = x @ weight cache[: out.size(0)].copy_(out) return out + 1 x = torch.empty(128, 1024, device="cuda") # warmup model(x) # capture cudagraph model_graph = torch.cuda.CUDAGraph() with torch.cuda.graph(model_graph): y = model(x) free_bytes = torch.cuda.mem_get_info()[0] allocator.sleep() free_bytes_after_sleep = torch.cuda.mem_get_info()[0] assert free_bytes_after_sleep > free_bytes allocator.wake_up() # after waking up, the content in the weight tensor # should be restored, but the content in the cache tensor # should be discarded # this operation is also compatible with cudagraph x.random_() model_graph.replay() # cache content is as expected assert torch.allclose(x, cache[: x.size(0)]) # output content is as expected assert torch.allclose(y, x + 1) @create_new_process_for_each_test("fork" if not current_platform.is_rocm() else "spawn") @pytest.mark.parametrize( "model", [ # sleep mode with safetensors "hmellor/tiny-random-LlamaForCausalLM", # sleep mode with pytorch checkpoint "facebook/opt-125m", ], ) def test_end_to_end(model: str): free, total = torch.cuda.mem_get_info() used_bytes_baseline = total - free # in case other process is running llm = LLM(model, enable_sleep_mode=True) prompt = "How are you?" sampling_params = SamplingParams(temperature=0, max_tokens=10) output = llm.generate(prompt, sampling_params) # the benefit of `llm.sleep(level=2)` is mainly CPU memory usage, # which is difficult to measure in the test. therefore, we only # test sleep level 1 here. llm.sleep(level=1) free_gpu_bytes_after_sleep, total = torch.cuda.mem_get_info() used_bytes = total - free_gpu_bytes_after_sleep - used_bytes_baseline # now the memory usage is mostly cudagraph memory pool, # and it should be less than the model weights (1B model, 2GiB weights) # NOTE: In V1, the memory buffer for logits (max_num_reqs x vocab_size) # is captured but cannot be releasesd from PyTorch due to a known bug, # therefore high memory usage after `llm.sleep` is called is expected. # FIXME(youkaichao & ywang96): Fix memory buffer issue with sleep mode # in V1. assert used_bytes < 7 * GiB_bytes llm.wake_up() output2 = llm.generate(prompt, sampling_params) # cmp output assert output[0].outputs[0].text == output2[0].outputs[0].text llm.sleep(level=1) llm.wake_up(tags=["weights"]) free_gpu_bytes_wake_up_w, total = torch.cuda.mem_get_info() used_bytes = total - free_gpu_bytes_wake_up_w - used_bytes_baseline # should just reallocate memory for weights (1B model, ~2GiB weights) assert used_bytes < 10 * GiB_bytes # now allocate kv cache memory llm.wake_up(tags=["kv_cache"]) output3 = llm.generate(prompt, sampling_params) # cmp output assert output[0].outputs[0].text == output3[0].outputs[0].text @create_new_process_for_each_test() def test_deep_sleep(): model = "hmellor/tiny-random-LlamaForCausalLM" free, total = torch.cuda.mem_get_info() used_bytes_baseline = total - free # in case other process is running llm = LLM(model, enable_sleep_mode=True) prompt = "How are you?" sampling_params = SamplingParams(temperature=0, max_tokens=10) output = llm.generate(prompt, sampling_params) # Put the engine to deep sleep llm.sleep(level=2) free_gpu_bytes_after_sleep, total = torch.cuda.mem_get_info() used_bytes = total - free_gpu_bytes_after_sleep - used_bytes_baseline assert used_bytes < 3 * GiB_bytes llm.wake_up(tags=["weights"]) llm.collective_rpc("reload_weights") free_gpu_bytes_wake_up_w, total = torch.cuda.mem_get_info() used_bytes = total - free_gpu_bytes_wake_up_w - used_bytes_baseline assert used_bytes < 4 * GiB_bytes # now allocate kv cache and cuda graph memory llm.wake_up(tags=["kv_cache"]) output2 = llm.generate(prompt, sampling_params) # cmp output assert output[0].outputs[0].text == output2[0].outputs[0].text @create_new_process_for_each_test() def test_deep_sleep_async(): async def test(): model = "hmellor/tiny-random-LlamaForCausalLM" free, total = torch.cuda.mem_get_info() used_bytes_baseline = total - free # in case other process is running engine_args = AsyncEngineArgs( model=model, enable_sleep_mode=True, ) llm = AsyncLLMEngine.from_engine_args(engine_args) prompt = "How are you?" sampling_params = SamplingParams(temperature=0, max_tokens=10) outputs = llm.generate(prompt, sampling_params, request_id="test_request_id1") async for output in outputs: pass # Put the engine to deep sleep await llm.sleep(level=2) await llm.wake_up(tags=["weights"]) await llm.collective_rpc("reload_weights") free_gpu_bytes_wake_up_w, total = torch.cuda.mem_get_info() used_bytes = total - free_gpu_bytes_wake_up_w - used_bytes_baseline assert used_bytes < 4 * GiB_bytes # now allocate kv cache and cuda graph memory await llm.wake_up(tags=["kv_cache"]) outputs2 = llm.generate(prompt, sampling_params, request_id="test_request_id2") async for output2 in outputs2: pass # cmp output assert output.outputs[0].text == output2.outputs[0].text asyncio.run(test()) @requires_fp8 def test_deep_sleep_fp8_kvcache(): GiB_bytes = 1 << 30 model = "Qwen/Qwen2-0.5B" used_bytes_baseline = current_platform.get_current_memory_usage() llm = LLM(model, enable_sleep_mode=True, kv_cache_dtype="fp8") prompt = "How are you?" sampling_params = SamplingParams(temperature=0, max_tokens=10) output = llm.generate(prompt, sampling_params) # Put the engine to deep sleep llm.sleep(level=2) used_bytes = current_platform.get_current_memory_usage() - used_bytes_baseline # Rocm uses more memory for CudaGraphs, so we add 2 GiB more for the threshold rocm_extra_mem_bytes = 2 * GiB_bytes if current_platform.is_rocm() else 0 mem_threshold_after_sleep = 3 * GiB_bytes + rocm_extra_mem_bytes assert used_bytes < mem_threshold_after_sleep llm.wake_up(tags=["weights"]) llm.collective_rpc("reload_weights") used_bytes = current_platform.get_current_memory_usage() - used_bytes_baseline mem_threshold_after_wake_up = 4 * GiB_bytes + rocm_extra_mem_bytes assert used_bytes < mem_threshold_after_wake_up # now allocate kv cache and cuda graph memory llm.wake_up(tags=["kv_cache"]) output2 = llm.generate(prompt, sampling_params) # cmp output assert output[0].outputs[0].text == output2[0].outputs[0].text
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/basic_correctness/test_cpu_offload.py
tests/basic_correctness/test_cpu_offload.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from ..utils import compare_two_settings def test_cpu_offload(): compare_two_settings( "hmellor/tiny-random-LlamaForCausalLM", [], ["--cpu-offload-gb", "1"] )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/basic_correctness/__init__.py
tests/basic_correctness/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/basic_correctness/test_basic_correctness.py
tests/basic_correctness/test_basic_correctness.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Compare the short outputs of HF and vLLM when using greedy sampling. Run `pytest tests/basic_correctness/test_basic_correctness.py`. """ import os import weakref from unittest.mock import Mock import pytest import torch from vllm import LLM from vllm.platforms import current_platform from vllm.v1.engine.llm_engine import LLMEngine from ..conftest import HfRunner, VllmRunner from ..models.utils import check_outputs_equal from ..utils import multi_gpu_test ATTN_BACKEND = ["ROCM_ATTN"] if current_platform.is_rocm() else ["FLASH_ATTN"] MODELS = [ "hmellor/tiny-random-Gemma2ForCausalLM", "meta-llama/Llama-3.2-1B-Instruct", ] TARGET_TEST_SUITE = os.environ.get("TARGET_TEST_SUITE", "L4") def test_vllm_gc_ed(): """Verify vllm instance is GC'ed when it is deleted""" llm = LLM("hmellor/tiny-random-LlamaForCausalLM") weak_llm = weakref.ref(llm) del llm # If there's any circular reference to vllm, this fails # because llm instance is not GC'ed. assert weak_llm() is None def _fix_prompt_embed_outputs( vllm_outputs: list[tuple[list[int], str]], hf_model: HfRunner, example_prompts: list[str], ) -> list[tuple[list[int], str]]: fixed_vllm_outputs = [] for vllm_output, hf_input, prompt in zip( vllm_outputs, hf_model.get_inputs(example_prompts), example_prompts ): hf_input_ids = hf_input["input_ids"].tolist()[0] fixed_vllm_outputs.append( ( hf_input_ids + vllm_output[0][len(hf_input_ids) :], prompt + vllm_output[1], ) ) return fixed_vllm_outputs @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("backend", ATTN_BACKEND) @pytest.mark.parametrize("max_tokens", [5]) @pytest.mark.parametrize("enforce_eager", [False]) @pytest.mark.parametrize("async_scheduling", [True, False]) @pytest.mark.parametrize("model_executor", ["uni", "mp"]) @pytest.mark.parametrize("enable_prompt_embeds", [True, False]) def test_models( hf_runner, model: str, backend: str, max_tokens: int, enforce_eager: bool, async_scheduling: bool, model_executor: str, enable_prompt_embeds: bool, ) -> None: # 5042 tokens for gemma2 # gemma2 has alternating sliding window size of 4096 # we need a prompt with more than 4096 tokens to test the sliding window prompt = ( "The following numbers of the sequence " + ", ".join(str(i) for i in range(1024)) + " are:" ) example_prompts = [prompt] with hf_runner(model) as hf_model: hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) if enable_prompt_embeds: with torch.no_grad(): prompt_embeds = hf_model.get_prompt_embeddings(example_prompts) with VllmRunner( model, max_model_len=8192, enforce_eager=enforce_eager, enable_prompt_embeds=enable_prompt_embeds, gpu_memory_utilization=0.7, async_scheduling=async_scheduling, distributed_executor_backend=model_executor, attention_config={"backend": backend}, ) as vllm_model: if enable_prompt_embeds: vllm_outputs = vllm_model.generate_greedy(prompt_embeds, max_tokens) vllm_outputs = _fix_prompt_embed_outputs( vllm_outputs, hf_model, example_prompts ) else: vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) check_outputs_equal( outputs_0_lst=hf_outputs, outputs_1_lst=vllm_outputs, name_0="hf", name_1="vllm", ) @multi_gpu_test(num_gpus=2) @pytest.mark.parametrize( "model, distributed_executor_backend, attention_backend, test_suite, extra_env", [ ("facebook/opt-125m", "ray", "", "L4", {}), ("facebook/opt-125m", "mp", "", "L4", {}), ("facebook/opt-125m", "ray", "", "L4", {"VLLM_SLEEP_WHEN_IDLE": "1"}), ("facebook/opt-125m", "mp", "", "L4", {"VLLM_SLEEP_WHEN_IDLE": "1"}), ("meta-llama/Llama-3.2-1B-Instruct", "ray", "", "L4", {}), ("meta-llama/Llama-3.2-1B-Instruct", "mp", "", "L4", {}), ("facebook/opt-125m", "ray", "", "A100", {}), ("facebook/opt-125m", "mp", "", "A100", {}), ], ) @pytest.mark.parametrize("enable_prompt_embeds", [True, False]) def test_models_distributed( monkeypatch: pytest.MonkeyPatch, hf_runner, vllm_runner, example_prompts, model: str, distributed_executor_backend: str, attention_backend: str, test_suite: str, extra_env: dict[str, str], enable_prompt_embeds: bool, ) -> None: if test_suite != TARGET_TEST_SUITE: pytest.skip(f"Skip test for {test_suite}") with monkeypatch.context() as monkeypatch_context: if ( model == "meta-llama/Llama-3.2-1B-Instruct" and distributed_executor_backend == "ray" and attention_backend == "" and test_suite == "L4" and enable_prompt_embeds ): # noqa pytest.skip("enable_prompt_embeds does not work with ray compiled dag.") for k, v in extra_env.items(): monkeypatch_context.setenv(k, v) dtype = "half" max_tokens = 5 # NOTE: take care of the order. run vLLM first, and then run HF. # vLLM needs a fresh new process without cuda initialization. # if we run HF first, the cuda initialization will be done and it # will hurt multiprocessing backend with fork method # (the default method). attention_config = {"backend": attention_backend} if attention_backend else None with vllm_runner( model, dtype=dtype, tensor_parallel_size=2, distributed_executor_backend=distributed_executor_backend, enable_prompt_embeds=enable_prompt_embeds, gpu_memory_utilization=0.7, attention_config=attention_config, ) as vllm_model: if enable_prompt_embeds: with hf_runner(model, dtype=dtype) as hf_model: with torch.no_grad(): prompt_embeds = hf_model.get_prompt_embeddings(example_prompts) vllm_outputs = vllm_model.generate_greedy(prompt_embeds, max_tokens) vllm_outputs = _fix_prompt_embed_outputs( vllm_outputs, hf_model, example_prompts ) hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) else: vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) with hf_runner(model, dtype=dtype) as hf_model: hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) check_outputs_equal( outputs_0_lst=hf_outputs, outputs_1_lst=vllm_outputs, name_0="hf", name_1="vllm", ) def test_failed_model_execution(vllm_runner, monkeypatch) -> None: # Needed to mock an error in the same process monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0") with vllm_runner("facebook/opt-125m", enforce_eager=True) as vllm_model: if isinstance(vllm_model.llm.llm_engine, LLMEngine): v1_test_failed_model_execution(vllm_model) def v1_test_failed_model_execution(vllm_model): engine = vllm_model.llm.llm_engine mocked_execute_model = Mock(side_effect=RuntimeError("Mocked Critical Error")) engine.engine_core.engine_core.model_executor.execute_model = mocked_execute_model with pytest.raises(RuntimeError) as exc_info: prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] vllm_model.generate_greedy(prompts, 200, use_tqdm=False) assert isinstance(exc_info.value, RuntimeError) assert "Mocked Critical Error" in str(exc_info.value)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/cuda/test_cuda_context.py
tests/cuda/test_cuda_context.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import ctypes from concurrent.futures import ThreadPoolExecutor import pytest import torch from vllm.platforms import current_platform def check_cuda_context(): """Check CUDA driver context status""" try: cuda = ctypes.CDLL("libcuda.so") device = ctypes.c_int() result = cuda.cuCtxGetDevice(ctypes.byref(device)) return (True, device.value) if result == 0 else (False, None) except Exception: return False, None def run_cuda_test_in_thread(device_input, expected_device_id): """Run CUDA context test in separate thread for isolation""" try: # New thread should have no CUDA context initially valid_before, device_before = check_cuda_context() if valid_before: return ( False, "CUDA context should not exist in new thread, " f"got device {device_before}", ) # Test setting CUDA context current_platform.set_device(device_input) # Verify context is created correctly valid_after, device_id = check_cuda_context() if not valid_after: return False, "CUDA context should be valid after set_cuda_context" if device_id != expected_device_id: return False, f"Expected device {expected_device_id}, got {device_id}" return True, "Success" except Exception as e: return False, f"Exception in thread: {str(e)}" class TestSetCudaContext: """Test suite for the set_cuda_context function.""" @pytest.mark.skipif(not current_platform.is_cuda(), reason="CUDA not available") @pytest.mark.parametrize( argnames="device_input,expected_device_id", argvalues=[ (0, 0), (torch.device("cuda:0"), 0), ("cuda:0", 0), ], ids=["int", "torch_device", "string"], ) def test_set_cuda_context_parametrized(self, device_input, expected_device_id): """Test setting CUDA context in isolated threads.""" with ThreadPoolExecutor(max_workers=1) as executor: future = executor.submit( run_cuda_test_in_thread, device_input, expected_device_id ) success, message = future.result(timeout=30) assert success, message @pytest.mark.skipif(not current_platform.is_cuda(), reason="CUDA not available") def test_set_cuda_context_invalid_device_type(self): """Test error handling for invalid device type.""" with pytest.raises(ValueError, match="Expected a cuda device"): current_platform.set_device(torch.device("cpu")) if __name__ == "__main__": pytest.main([__file__, "-v"])
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_glm4_moe_tool_parser.py
tests/tool_parsers/test_glm4_moe_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 import json import pytest from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall from vllm.tokenizers import get_tokenizer from vllm.tool_parsers.glm4_moe_tool_parser import ( Glm4MoeModelToolParser, ) pytest.skip("skip glm4_moe parser test", allow_module_level=True) # Use a common model that is likely to be available MODEL = "zai-org/GLM-4.5" @pytest.fixture(scope="module") def glm4_moe_tokenizer(): return get_tokenizer(tokenizer_name=MODEL) @pytest.fixture def glm4_moe_tool_parser(glm4_moe_tokenizer): return Glm4MoeModelToolParser(glm4_moe_tokenizer) def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert isinstance(actual_tool_call.id, str) assert len(actual_tool_call.id) > 0 assert actual_tool_call.type == "function" assert actual_tool_call.function.name == expected_tool_call.function.name # Compare arguments as JSON objects to handle formatting differences actual_args = json.loads(actual_tool_call.function.arguments) expected_args = json.loads(expected_tool_call.function.arguments) assert actual_args == expected_args def test_extract_tool_calls_no_tools(glm4_moe_tool_parser): model_output = "This is a test" extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "single_tool_call", "multiple_tool_calls", "tool_call_with_content_before", "tool_call_with_mixed_args", "tool_call_with_chinese_content", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """<tool_call>get_current_weather <arg_key>city</arg_key> <arg_value>Dallas</arg_value> <arg_key>state</arg_key> <arg_value>TX</arg_value> <arg_key>unit</arg_key> <arg_value>fahrenheit</arg_value> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], None, ), ( """<tool_call>get_current_weather <arg_key>city</arg_key> <arg_value>Dallas</arg_value> <arg_key>state</arg_key> <arg_value>TX</arg_value> <arg_key>unit</arg_key> <arg_value>fahrenheit</arg_value> </tool_call> <tool_call>get_current_weather <arg_key>city</arg_key> <arg_value>Orlando</arg_value> <arg_key>state</arg_key> <arg_value>FL</arg_value> <arg_key>unit</arg_key> <arg_value>fahrenheit</arg_value> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Orlando", "state": "FL", "unit": "fahrenheit", } ), ) ), ], None, ), ( """I'll help you check the weather. <tool_call>get_current_weather <arg_key>city</arg_key> <arg_value>Seattle</arg_value> <arg_key>state</arg_key> <arg_value>WA</arg_value> <arg_key>unit</arg_key> <arg_value>celsius</arg_value> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Seattle", "state": "WA", "unit": "celsius", } ), ) ) ], "I'll help you check the weather.", ), ( """<tool_call>get_current_weather <arg_key>city</arg_key> <arg_value>New York</arg_value> <arg_key>state</arg_key> <arg_value>NY</arg_value> <arg_key>unit</arg_key> <arg_value>celsius</arg_value> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "New York", "state": "NY", "unit": "celsius", } ), ) ) ], None, ), ( """I will help you get the weather.<tool_call>get_weather <arg_key>city</arg_key> <arg_value>Beijing</arg_value> <arg_key>date</arg_key> <arg_value>2025-08-01</arg_value> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_weather", arguments=json.dumps( { "city": "Beijing", "date": "2025-08-01", } ), ) ) ], "I will help you get the weather.", ), ], ) def test_extract_tool_calls( glm4_moe_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content def test_extract_tool_calls_with_thinking_tags(glm4_moe_tool_parser): """Test tool extraction when thinking tags are present.""" model_output = """<think>I want to get the weather.</think> I will help you get the weather. <tool_call>get_weather <arg_key>city</arg_key> <arg_value>Beijing</arg_value> <arg_key>date</arg_key> <arg_value>2025-08-01</arg_value> </tool_call>""" extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert len(extracted_tool_calls.tool_calls) == 1 assert extracted_tool_calls.tool_calls[0].function.name == "get_weather" expected_content = """<think>I want to get the weather.</think> I will help you get the weather.""" assert extracted_tool_calls.content == expected_content def test_extract_tool_calls_malformed_xml(glm4_moe_tool_parser): """Test that malformed XML is handled gracefully.""" model_output = """<tool_call>get_weather <arg_key>city</arg_key> <arg_value>Seattle</arg_value> <arg_key>incomplete_arg <arg_value>value</arg_value> </tool_call>""" extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] # Should handle malformed XML gracefully # The parser should either extract what it can or return no tool calls # depending on how robust we want the parsing to be assert isinstance(extracted_tool_calls.tools_called, bool) assert isinstance(extracted_tool_calls.tool_calls, list) def test_extract_tool_calls_empty_arguments(glm4_moe_tool_parser): """Test tool calls with no arguments.""" model_output = """<tool_call>get_current_time </tool_call>""" extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert len(extracted_tool_calls.tool_calls) == 1 assert extracted_tool_calls.tool_calls[0].function.name == "get_current_time" # Empty arguments should result in empty JSON object assert extracted_tool_calls.tool_calls[0].function.arguments == "{}" def test_extract_tool_calls_mixed_content(glm4_moe_tool_parser): """Test extraction with mixed content and multiple tool calls.""" model_output = """I will help you get the weather info. <tool_call>get_weather <arg_key>city</arg_key> <arg_value>Beijing</arg_value> <arg_key>date</arg_key> <arg_value>2025-08-01</arg_value> </tool_call> meaningwhile, I will also check the weather in Shanghai. <tool_call>get_weather <arg_key>city</arg_key> <arg_value>Shanghai</arg_value> <arg_key>date</arg_key> <arg_value>2025-08-01</arg_value> </tool_call>""" extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert len(extracted_tool_calls.tool_calls) == 2 # Check first tool call assert extracted_tool_calls.tool_calls[0].function.name == "get_weather" args1 = json.loads(extracted_tool_calls.tool_calls[0].function.arguments) assert args1["city"] == "Beijing" assert args1["date"] == "2025-08-01" # Check second tool call assert extracted_tool_calls.tool_calls[1].function.name == "get_weather" args2 = json.loads(extracted_tool_calls.tool_calls[1].function.arguments) assert args2["city"] == "Shanghai" assert args2["date"] == "2025-08-01" # Content should be everything before the first tool call assert extracted_tool_calls.content == "I will help you get the weather info." def test_streaming_basic_functionality(glm4_moe_tool_parser): """Test basic streaming functionality.""" # Reset streaming state glm4_moe_tool_parser.current_tool_name_sent = False glm4_moe_tool_parser.prev_tool_call_arr = [] glm4_moe_tool_parser.current_tool_id = -1 glm4_moe_tool_parser.streamed_args_for_tool = [] # Test with a simple tool call current_text = """<tool_call>get_weather <arg_key>city</arg_key> <arg_value>Beijing</arg_value> </tool_call>""" # Mock token IDs for testing tool_call_start_id = glm4_moe_tool_parser.tool_call_start_token_id or 12345 tool_call_end_id = glm4_moe_tool_parser.tool_call_end_token_id or 12346 result = glm4_moe_tool_parser.extract_tool_calls_streaming( previous_text="", current_text=current_text, delta_text="</tool_call>", previous_token_ids=[], current_token_ids=[tool_call_start_id, tool_call_end_id], delta_token_ids=[tool_call_end_id], request=None, ) # The result behavior depends on the streaming state # This test mainly ensures no exceptions are thrown assert result is None or hasattr(result, "tool_calls") or hasattr(result, "content") def test_streaming_no_tool_calls(glm4_moe_tool_parser): """Test streaming when there are no tool calls.""" current_text = "This is just regular text without any tool calls." result = glm4_moe_tool_parser.extract_tool_calls_streaming( previous_text="This is just regular text", current_text=current_text, delta_text=" without any tool calls.", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # Should return the delta text as content assert result is not None assert hasattr(result, "content") assert result.content == " without any tool calls." def test_streaming_with_content_before_tool_calls(glm4_moe_tool_parser): """Test streaming when there's content before tool calls.""" # Reset streaming state glm4_moe_tool_parser.current_tool_name_sent = False glm4_moe_tool_parser.prev_tool_call_arr = [] glm4_moe_tool_parser.current_tool_id = -1 glm4_moe_tool_parser.streamed_args_for_tool = [] current_text = "I will help you get the weather<tool_call>" result = glm4_moe_tool_parser.extract_tool_calls_streaming( previous_text="I will help you", current_text=current_text, delta_text="get the weather.<tool_call>", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # Should return content when no tool call tokens are detected assert result is not None assert hasattr(result, "content") assert result.content == "get the weather.<tool_call>" def test_extract_tool_calls_special_characters(glm4_moe_tool_parser): """Test tool calls with special characters and unicode.""" model_output = """<tool_call>send_message <arg_key>recipient</arg_key> <arg_value>Amy</arg_value> <arg_key>message</arg_key> <arg_value>It is a nice day</arg_value> <arg_key>priority</arg_key> <arg_value>high</arg_value> </tool_call>""" extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert len(extracted_tool_calls.tool_calls) == 1 assert extracted_tool_calls.tool_calls[0].function.name == "send_message" args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments) assert args["recipient"] == "Amy" assert args["message"] == "It is a nice day" assert args["priority"] == "high" def test_extract_tool_calls_incomplete_tool_call(glm4_moe_tool_parser): """Test incomplete tool calls (missing closing tag).""" model_output = """<tool_call>get_weather <arg_key>city</arg_key> <arg_value>Beijing</arg_value> <arg_key>date</arg_key> <arg_value>2025-08-01</arg_value>""" extracted_tool_calls = glm4_moe_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] # Incomplete tool calls should not be extracted assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_mistral_tool_parser.py
tests/tool_parsers/test_mistral_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from collections.abc import Generator import partial_json_parser import pytest from mistral_common.protocol.instruct.messages import AssistantMessage from mistral_common.protocol.instruct.request import InstructRequest from mistral_common.protocol.instruct.tool_calls import FunctionCall, ToolCall from partial_json_parser.core.options import Allow from vllm.entrypoints.openai.protocol import DeltaMessage, DeltaToolCall from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.tokenizers.mistral import MistralTokenizer from vllm.tool_parsers.mistral_tool_parser import MistralToolParser @pytest.fixture(scope="module") def mistral_pre_v11_tokenizer(): MODEL = "mistralai/Mistral-7B-Instruct-v0.3" return get_tokenizer(tokenizer_name=MODEL) @pytest.fixture(scope="module") def mistral_tokenizer(): MODEL = "mistralai/Mistral-Small-3.2-24B-Instruct-2506" return get_tokenizer(tokenizer_name=MODEL, tokenizer_mode="mistral") @pytest.fixture def mistral_pre_v11_tool_parser(mistral_pre_v11_tokenizer): return MistralToolParser(mistral_pre_v11_tokenizer) @pytest.fixture def mistral_tool_parser(mistral_tokenizer): return MistralToolParser(mistral_tokenizer) def assert_tool_calls( actual_tool_calls: list[ToolCall] | list[DeltaToolCall], expected_tool_calls: list[ToolCall], ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert isinstance(actual_tool_call.id, str) assert len(actual_tool_call.id) == 9 if isinstance(actual_tool_call, ToolCall): assert actual_tool_call.type == "function" elif isinstance(actual_tool_call, DeltaToolCall): assert actual_tool_call.function is not None assert actual_tool_call.function.name is not None assert actual_tool_call.function.arguments is not None assert actual_tool_call.function is not None assert actual_tool_call.function.name == expected_tool_call.function.name, ( f"got wrong function name:${actual_tool_call.function.name}" ) assert ( actual_tool_call.function.arguments == expected_tool_call.function.arguments ), f"got wrong function argument:${actual_tool_call.function.arguments}" def fix_tool_call_tokenization( tokens: list[int], mistral_tool_parser: MistralToolParser, mistral_tokenizer: TokenizerLike, ): """ Replaces the textual token sequence for [TOOL_CALLS] with its single special token ID. """ textual_tool_call_token_ids = mistral_tokenizer.encode( text=mistral_tool_parser.bot_token, add_special_tokens=False, ) # textual_tool_call_token_ids must not contain special tokens like bos, eos etc special_tool_call_token_ids = [mistral_tool_parser.bot_token_id] # If the input is too short to contain the sequence, no replacement is possible if not tokens or len(tokens) < len(textual_tool_call_token_ids): return tokens result_tokens = [] i = 0 target_len = len(textual_tool_call_token_ids) while i < len(tokens): # Check if the slice from the current position matches the target sequence if tokens[i : i + target_len] == textual_tool_call_token_ids: # If it matches, add the replacement and jump the index forward result_tokens.extend(special_tool_call_token_ids) i += target_len else: # Otherwise, just add the current token and move to the next one result_tokens.append(tokens[i]) i += 1 return result_tokens def stream_delta_message_generator( mistral_tool_parser: MistralToolParser, mistral_tokenizer: TokenizerLike, model_output: str | None, tools: list[tuple[str, str]] | None, ) -> Generator[DeltaMessage, None, None]: if ( isinstance(mistral_tokenizer, MistralTokenizer) and mistral_tokenizer.version >= 11 ): # With the newer versions of the tokenizer, # we cannot tokenize free text # so we need to create a list of messages to get tokenized assert tools is not None assistant_msg = AssistantMessage( tool_calls=[ ToolCall( function=FunctionCall( name=name, arguments=arg, ) ) for (name, arg) in tools ], ) request = InstructRequest( messages=[assistant_msg], ) all_token_ids = mistral_tokenizer.instruct.encode_instruct(request).tokens else: # Older versions of the tokenizer are # able to encode directly the model's output (free text) into tokens assert model_output is not None all_token_ids = mistral_tokenizer.encode(model_output, add_special_tokens=False) all_token_ids = fix_tool_call_tokenization( all_token_ids, mistral_tool_parser, mistral_tokenizer ) previous_text = "" previous_tokens = None prefix_offset = 0 read_offset = 0 for i, delta_token in enumerate(all_token_ids): delta_token_ids = [delta_token] previous_token_ids = all_token_ids[:i] current_token_ids = all_token_ids[: i + 1] (new_tokens, delta_text, new_prefix_offset, new_read_offset) = ( detokenize_incrementally( tokenizer=mistral_tokenizer, all_input_ids=current_token_ids, prev_tokens=previous_tokens, prefix_offset=prefix_offset, read_offset=read_offset, skip_special_tokens=isinstance(mistral_tokenizer, MistralTokenizer), spaces_between_special_tokens=True, ) ) current_text = previous_text + delta_text delta_message = mistral_tool_parser.extract_tool_calls_streaming( previous_text, current_text, delta_text, previous_token_ids, current_token_ids, delta_token_ids, request=None, # type: ignore[arg-type] ) if delta_message: yield delta_message previous_text = current_text previous_tokens = ( previous_tokens + new_tokens if previous_tokens else new_tokens ) prefix_offset = new_prefix_offset read_offset = new_read_offset def test_extract_tool_calls_no_tools(mistral_pre_v11_tool_parser): model_output = "This is a test" extracted_tool_calls = mistral_pre_v11_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "single_tool_add", "single_tool_weather", "argument_before_name", "argument_before_name_and_name_in_argument", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """[TOOL_CALLS][{"name": "add", "arguments":{"a": 3.5, "b": 4}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3.5, "b": 4}) ) ) ], None, ), ( """[TOOL_CALLS] [{"name": "get_current_weather", "arguments":{"city": "San Francisco", "state": "CA", "unit": "celsius"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], None, ), ( """[TOOL_CALLS] [{"arguments":{"city": "San Francisco", "state": "CA", "unit": "celsius"}, "name": "get_current_weather"}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], None, ), ( """[TOOL_CALLS] [{"arguments":{"name": "John Doe"}, "name": "get_age"}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_age", arguments=json.dumps( { "name": "John Doe", } ), ) ) ], None, ), ], ) def test_extract_tool_calls_pre_v11_tokenizer( mistral_pre_v11_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = mistral_pre_v11_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content @pytest.mark.parametrize( ids=[ "single_tool_add", "single_tool_weather", "multiple_tool_calls", "complex", "wrong_json", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """[TOOL_CALLS]add_this_and_that{"a": 3.5, "b": 4}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add_this_and_that", arguments=json.dumps({"a": 3.5, "b": 4}), ) ) ], None, ), ( """[TOOL_CALLS]get_current_weather{"city": "San Francisco", "state": "CA", "unit": "celsius"}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], None, ), ( """[TOOL_CALLS]add{"a": 3.5, "b": 4}[TOOL_CALLS]multiply{"a": 3, "b": 6}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3.5, "b": 4}) ) ), ToolCall( function=FunctionCall( name="multiply", arguments=json.dumps({"a": 3, "b": 6}) ) ), ], None, ), ( # Complex """hi{hi[TOOL_CALLS]bash{"command": "print(\\"hello world!\\")\\nre.compile(r\'{}\')""", # noqa: E501 [ ToolCall( function=FunctionCall( name="bash", arguments=json.dumps( {"command": "print(\"hello world!\")\nre.compile(r'{}')"} )[:-2], ) ) ], "hi{hi", ), ( # Wrong json """hi{hi[TOOL_CALLS]bash{"command": "print(\\"hello world!\\")\\nre.compile(r\'{}\')"}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="bash", arguments=json.dumps( {"command": "print(\"hello world!\")\nre.compile(r'{}')"} ), ) ) ], "hi{hi", ), ], ) def test_extract_tool_calls( mistral_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = mistral_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content def _test_extract_tool_calls_streaming( tool_parser, tokenizer, model_output, tools, expected_tool_calls, expected_content ): other_content: str = "" function_names: list[str] = [] function_args_strs: list[str] = [] tool_call_idx: int = -1 tool_call_ids: list[str | None] = [] for delta_message in stream_delta_message_generator( tool_parser, tokenizer, model_output, tools ): # role should never be streamed from tool parser assert not delta_message.role if delta_message.content: other_content += delta_message.content streamed_tool_calls = delta_message.tool_calls if streamed_tool_calls and len(streamed_tool_calls) > 0: # make sure only one diff is present - correct even for parallel assert len(streamed_tool_calls) == 1 tool_call = streamed_tool_calls[0] assert len(tool_parser.prev_tool_call_arr) > 0 # if a new tool is being called, set up empty arguments if tool_call.index != tool_call_idx: tool_call_idx = tool_call.index function_args_strs.append("") tool_call_ids.append(None) # if a tool call ID is streamed, make sure one hasn't been already if tool_call.id and not tool_call_ids[tool_call.index]: tool_call_ids[tool_call.index] = tool_call.id # if parts of the function start being streamed if tool_call.function: # if the function name is defined, set it. it should be streamed # IN ENTIRETY, exactly one time. if tool_call.function.name: assert isinstance(tool_call.function.name, str) function_names.append(tool_call.function.name) if tool_call.function.arguments: # make sure they're a string and then add them to the list assert isinstance(tool_call.function.arguments, str) function_args_strs[tool_call.index] += tool_call.function.arguments assert other_content == expected_content actual_tool_calls = [ ToolCall( id=tool_call_id, function=FunctionCall( name=function_name, arguments=partial_json_parser.ensure_json( function_args_str, Allow.OBJ | Allow.STR ), ), ) for tool_call_id, function_name, function_args_str in zip( tool_call_ids, function_names, function_args_strs ) ] assert_tool_calls(actual_tool_calls, expected_tool_calls) @pytest.mark.parametrize( ids=[ "no_tools", "single_tool_add", "single_tool_add_strings", "single_tool_weather", "argument_before_name", "argument_before_name_and_name_in_argument", "multiple_tools", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ("""This is a test""", [], """This is a test"""), ( """[TOOL_CALLS] [ {"name":"add" , "arguments" : {"a": 3, "b": 4} } ]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3, "b": 4}) ) ) ], "", ), ( """[TOOL_CALLS] [{"name": "add", "arguments":{"a": "3", "b": "4"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": "3", "b": "4"}) ) ) ], "", ), ( """[TOOL_CALLS] [{"name": "get_current_weather", "arguments": {"city": "San Francisco", "state": "CA", "unit": "celsius"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], "", ), ( """[TOOL_CALLS] [{"arguments": {"city": "San Francisco", "state": "CA", "unit": "celsius"}, "name": "get_current_weather"}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], "", ), ( """[TOOL_CALLS] [{"arguments": {"name": "John Doe"}, "name": "get_age"}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_age", arguments=json.dumps( { "name": "John Doe", } ), ) ) ], "", ), ( """[TOOL_CALLS] [{"name": "add", "arguments": {"a": 3.5, "b": 4}}, {"name": "get_current_weather", "arguments":{"city": "San Francisco", "state": "CA", "unit": "celsius"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3.5, "b": 4}) ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ), ], "", ), ], ) def test_extract_tool_calls_streaming_pre_v11_tokenizer( mistral_pre_v11_tool_parser, mistral_pre_v11_tokenizer, model_output, expected_tool_calls, expected_content, ): _test_extract_tool_calls_streaming( mistral_pre_v11_tool_parser, mistral_pre_v11_tokenizer, model_output, None, expected_tool_calls, expected_content, ) @pytest.mark.parametrize( ids=[ "single_tool_add", "single_tool_add_strings", "multiple_tools", ], argnames=["tools", "expected_tool_calls", "expected_content"], argvalues=[ ( [("add", '{"a": 3, "b": 4}')], # [TOOL_CALLS]add{"a": 3, "b": 4} [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3, "b": 4}) ) ) ], "", ), ( [("add_two_strings", '{"a": "3", "b": "4"}')], # [TOOL_CALLS]add_two_strings{"a": "3", "b": "4"} [ ToolCall( function=FunctionCall( name="add_two_strings", arguments=json.dumps({"a": "3", "b": "4"}), ) ) ], "", ), ( [ ("add", '{"a": 3.5, "b": 4}'), ( "get_current_weather", '{"city": "San Francisco", "state": "CA", "unit": "celsius"}', # noqa: E501 ), ], # [TOOL_CALLS]add{"a": 3.5, "b": 4}[TOOL_CALLS]get_current_weather{"city": "San Francisco", "state": "CA", "unit": "celsius"} # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3.5, "b": 4}) ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ), ], "", ), ], ) def test_extract_tool_calls_streaming( mistral_tool_parser, mistral_tokenizer, tools, expected_tool_calls, expected_content, ): _test_extract_tool_calls_streaming( mistral_tool_parser, mistral_tokenizer, None, tools, expected_tool_calls, expected_content, ) @pytest.mark.parametrize( ids=[ "single_tool_add", "single_tool_weather", "multiple_tool_calls", "content_before_tool", "complex", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """[TOOL_CALLS]add_this_and_that{"a": 3.5, "b": 4}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add_this_and_that", arguments=json.dumps({"a": 3.5, "b": 4}), ) ) ], "", ), ( """[TOOL_CALLS]get_current_weather{"city": "San Francisco", "state": "CA", "unit": "celsius"}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], "", ), ( """[TOOL_CALLS]add{"a": 3.5, "b": 4}[TOOL_CALLS]multiply{"a": 3, "b": 6}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3.5, "b": 4}) ) ), ToolCall( function=FunctionCall( name="multiply", arguments=json.dumps({"a": 3, "b": 6}) ) ), ], "", ), ( # Additional content should not be after the tool calls """bla[TOOL_CALLS]add_this_and_that{"a": 3.5, "b": 4}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add_this_and_that", arguments=json.dumps({"a": 3.5, "b": 4}), ) ) ], "bla", ), ( # Complex """hi{hi[TOOL_CALLS]bash{"command": "print(\\"hello world!\\")\\nre.compile(r\'{}\')"}""", # noqa: E501 [ ToolCall( function=FunctionCall( name="bash", arguments=json.dumps( {"command": "print(\"hello world!\")\nre.compile(r'{}')"} ), ) ) ], "hi{hi", ), ], ) def test_extract_tool_calls_streaming_one_chunk( mistral_tool_parser, mistral_tokenizer, model_output, expected_tool_calls, expected_content, ): if isinstance(mistral_tokenizer, MistralTokenizer): all_token_ids = mistral_tokenizer.encode(model_output) else: all_token_ids = mistral_tokenizer.encode(model_output, add_special_tokens=False) all_token_ids = fix_tool_call_tokenization( all_token_ids, mistral_tool_parser, mistral_tokenizer ) delta_message = mistral_tool_parser.extract_tool_calls_streaming( previous_text="", current_text=model_output, delta_text=model_output, previous_token_ids=[], current_token_ids=all_token_ids, delta_token_ids=all_token_ids, request=None, ) # type: ignore[arg-type] assert isinstance(delta_message, DeltaMessage) assert len(delta_message.tool_calls) == len(expected_tool_calls) assert_tool_calls(delta_message.tool_calls, expected_tool_calls) if delta_message.content is None: assert expected_content == "" else: assert delta_message.content == expected_content @pytest.mark.parametrize( ids=[ "no_tools", "single_tool_add", "single_tool_add_strings", "single_tool_weather", "argument_before_name", "argument_before_name_and_name_in_argument", "multiple_tools", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ("""This is a test""", [], """This is a test"""), ( """[TOOL_CALLS] [ {"name":"add" , "arguments" : {"a": 3, "b": 4} } ]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3, "b": 4}) ) ) ], "", ), ( """[TOOL_CALLS] [{"name": "add", "arguments":{"a": "3", "b": "4"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": "3", "b": "4"}) ) ) ], "", ), ( """[TOOL_CALLS] [{"name": "get_current_weather", "arguments": {"city": "San Francisco", "state": "CA", "unit": "celsius"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], "", ), ( """[TOOL_CALLS] [{"arguments": {"city": "San Francisco", "state": "CA", "unit": "celsius"}, "name": "get_current_weather"}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ) ], "", ), ( """[TOOL_CALLS] [{"arguments": {"name": "John Doe"}, "name": "get_age"}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_age", arguments=json.dumps( { "name": "John Doe", } ), ) ) ], "", ), ( """[TOOL_CALLS] [{"arguments": {"a": 3.5, "b": 4}, "name": "add"}, {"arguments":{"city": "San Francisco", "state": "CA", "unit": "celsius"}, "name": "get_current_weather"}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="add", arguments=json.dumps({"a": 3.5, "b": 4}) ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "San Francisco", "state": "CA", "unit": "celsius"} ), ) ), ], "", ), ], ) def test_extract_tool_calls_streaming_pre_v11_tokenizer_one_chunk( mistral_pre_v11_tool_parser, mistral_pre_v11_tokenizer, model_output, expected_tool_calls, expected_content, ): if isinstance(mistral_pre_v11_tokenizer, MistralTokenizer): all_token_ids = mistral_pre_v11_tokenizer.encode(model_output) else: all_token_ids = mistral_pre_v11_tokenizer.encode( model_output, add_special_tokens=False ) all_token_ids = fix_tool_call_tokenization( all_token_ids, mistral_pre_v11_tool_parser, mistral_pre_v11_tokenizer ) delta_message = mistral_pre_v11_tool_parser.extract_tool_calls_streaming( previous_text="", current_text=model_output, delta_text=model_output, previous_token_ids=[], current_token_ids=all_token_ids, delta_token_ids=all_token_ids, request=None, ) # type: ignore[arg-type] assert isinstance(delta_message, DeltaMessage) assert len(delta_message.tool_calls) == len(expected_tool_calls) assert_tool_calls(delta_message.tool_calls, expected_tool_calls) if delta_message.content is None: assert expected_content == "" else: assert delta_message.content == expected_content
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_xlam_tool_parser.py
tests/tool_parsers/test_xlam_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from collections.abc import Generator import pytest from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, DeltaMessage, FunctionCall, ToolCall, ) from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.tool_parsers.xlam_tool_parser import xLAMToolParser # Use a common model that is likely to be available MODEL = "Salesforce/Llama-xLAM-2-8B-fc-r" @pytest.fixture(scope="module") def xlam_tokenizer(): return get_tokenizer(tokenizer_name=MODEL) @pytest.fixture def xlam_tool_parser(xlam_tokenizer): return xLAMToolParser(xlam_tokenizer) def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert isinstance(actual_tool_call.id, str) assert len(actual_tool_call.id) > 16 assert actual_tool_call.type == "function" assert actual_tool_call.function == expected_tool_call.function def stream_delta_message_generator( xlam_tool_parser: xLAMToolParser, xlam_tokenizer: TokenizerLike, model_output: str, request: ChatCompletionRequest | None = None, ) -> Generator[DeltaMessage, None, None]: all_token_ids = xlam_tokenizer.encode(model_output, add_special_tokens=False) previous_text = "" previous_tokens = None prefix_offset = 0 read_offset = 0 for i, delta_token in enumerate(all_token_ids): delta_token_ids = [delta_token] previous_token_ids = all_token_ids[:i] current_token_ids = all_token_ids[: i + 1] (new_tokens, delta_text, new_prefix_offset, new_read_offset) = ( detokenize_incrementally( tokenizer=xlam_tokenizer, all_input_ids=current_token_ids, prev_tokens=previous_tokens, prefix_offset=prefix_offset, read_offset=read_offset, skip_special_tokens=False, spaces_between_special_tokens=True, ) ) current_text = previous_text + delta_text delta_message = xlam_tool_parser.extract_tool_calls_streaming( previous_text, current_text, delta_text, previous_token_ids, current_token_ids, delta_token_ids, request=request, ) if delta_message: yield delta_message previous_text = current_text previous_tokens = ( previous_tokens + new_tokens if previous_tokens else new_tokens ) prefix_offset = new_prefix_offset read_offset = new_read_offset def test_extract_tool_calls_no_tools(xlam_tool_parser): model_output = "This is a test" extracted_tool_calls = xlam_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "parallel_tool_calls", "single_tool_with_think_tag", "single_tool_with_json_code_block", "single_tool_with_tool_calls_tag", "single_tool_with_tool_call_xml_tags", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}, {"name": "get_current_weather", "arguments": {"city": "Orlando", "state": "FL", "unit": "fahrenheit"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Orlando", "state": "FL", "unit": "fahrenheit", } ), ) ), ], None, ), ( """<think>I'll help you with that.</think>[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "<think>I'll help you with that.</think>", ), ( """I'll help you with that.\n```json\n[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]\n```""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "I'll help you with that.", ), ( """I'll check the weather for you.[TOOL_CALLS][{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "I'll check the weather for you.", ), ( """I'll help you check the weather.<tool_call>[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]</tool_call>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "I'll help you check the weather.", ), ], ) def test_extract_tool_calls( xlam_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = xlam_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content @pytest.mark.parametrize( ids=["list_structured_tool_call"], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """[{"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA", "unit": "celsius"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Seattle", "state": "WA", "unit": "celsius", } ), ) ) ], None, ), ], ) def test_extract_tool_calls_list_structure( xlam_tool_parser, model_output, expected_tool_calls, expected_content ): """Test extraction of tool calls when the model outputs a list-structured tool call.""" # noqa: E501 extracted_tool_calls = xlam_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content # Test for preprocess_model_output method def test_preprocess_model_output(xlam_tool_parser): # Test with list structure model_output = ( """[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]""" # noqa: E501 ) content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( model_output ) assert content is None assert potential_tool_calls == model_output # Test with thinking tag model_output = """<think>I'll help you with that.</think>[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]""" # noqa: E501 content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( model_output ) assert content == "<think>I'll help you with that.</think>" assert ( potential_tool_calls == '[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]' ) # Test with JSON code block model_output = """I'll help you with that. ```json [{"name": "get_current_weather", "arguments": {"city": "Seattle"}}] ```""" content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( model_output ) assert content == "I'll help you with that." assert "get_current_weather" in potential_tool_calls # Test with no tool calls model_output = """I'll help you with that.""" content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( model_output ) assert content == model_output assert potential_tool_calls is None # Simulate streaming to test extract_tool_calls_streaming def test_streaming_with_list_structure(xlam_tool_parser): # Reset streaming state xlam_tool_parser.prev_tool_calls = [] xlam_tool_parser.current_tools_sent = [] xlam_tool_parser.streamed_args = [] xlam_tool_parser.current_tool_id = -1 # Simulate receiving a message with list structure current_text = ( """[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]""" # noqa: E501 ) # First call to set up the tool xlam_tool_parser.extract_tool_calls_streaming( previous_text="", current_text=current_text, delta_text="]", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # Make sure the tool is set up correctly assert xlam_tool_parser.current_tool_id >= 0, "Tool index should be initialized" # Manually set up the state for sending the tool name xlam_tool_parser.current_tools_sent = [False] # Call to send the function name result = xlam_tool_parser.extract_tool_calls_streaming( previous_text=current_text, current_text=current_text, delta_text="", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # Check that we get a result with the proper tool call if result is not None: assert hasattr(result, "tool_calls") assert len(result.tool_calls) == 1 assert result.tool_calls[0].function.name == "get_current_weather" @pytest.mark.parametrize( ids=[ "parallel_tool_calls", "single_tool_with_think_tag", "single_tool_with_json_code_block", "single_tool_with_tool_calls_tag", "single_tool_with_tool_call_xml_tags", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}, {"name": "get_current_weather", "arguments": {"city": "Orlando", "state": "FL", "unit": "fahrenheit"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Orlando", "state": "FL", "unit": "fahrenheit", } ), ) ), ], "", ), ( """<think>I'll help you with that.</think>[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "<think>I'll help you with that.</think>", ), ( """```json\n[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]\n```""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "", ), ( """[TOOL_CALLS][{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "", ), ( """I can help with that.<tool_call>[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]</tool_call>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], "I can help with that.", ), ], ) def test_extract_tool_calls_streaming_incremental( xlam_tool_parser, xlam_tokenizer, model_output, expected_tool_calls, expected_content, ): """Verify the XLAM Parser streaming behavior by verifying each chunk is as expected.""" # noqa: E501 request = ChatCompletionRequest(model=MODEL, messages=[], tools=[]) chunks = [] for delta_message in stream_delta_message_generator( xlam_tool_parser, xlam_tokenizer, model_output, request ): chunks.append(delta_message) # Should have multiple chunks assert len(chunks) >= 3 # Should have a chunk with tool header (id, name, type) for the first tool call # noqa: E501 header_found = False expected_first_tool = expected_tool_calls[0] for chunk in chunks: if chunk.tool_calls and chunk.tool_calls[0].id: header_found = True assert ( chunk.tool_calls[0].function.name == expected_first_tool.function.name ) assert chunk.tool_calls[0].type == "function" # Arguments may be empty initially or None if chunk.tool_calls[0].function.arguments is not None: # If present, should be empty string initially assert chunk.tool_calls[0].function.arguments == "" break assert header_found # Should have chunks with incremental arguments arg_chunks = [] for chunk in chunks: if ( chunk.tool_calls and chunk.tool_calls[0].function.arguments and chunk.tool_calls[0].function.arguments != "" and chunk.tool_calls[0].index == 0 # Only collect arguments from the first tool call ): arg_chunks.append(chunk.tool_calls[0].function.arguments) # Arguments should be streamed incrementally assert len(arg_chunks) > 1 # Concatenated arguments should form valid JSON for the first tool call full_args = "".join(arg_chunks) parsed_args = json.loads(full_args) expected_args = json.loads(expected_first_tool.function.arguments) assert parsed_args == expected_args
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_seed_oss_tool_parser.py
tests/tool_parsers/test_seed_oss_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 import json from collections.abc import Generator import pytest from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, ChatCompletionToolsParam, DeltaMessage, FunctionCall, ToolCall, ) from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.tool_parsers.seed_oss_tool_parser import SeedOssToolParser # Use a common model that is likely to be available MODEL = "ByteDance-Seed/Seed-OSS-36B-Instruct" @pytest.fixture(scope="module") def seed_oss_tokenizer(): return get_tokenizer(tokenizer_name=MODEL, trust_remote_code=True) @pytest.fixture def seed_oss_tool_parser(seed_oss_tokenizer): return SeedOssToolParser(seed_oss_tokenizer) @pytest.fixture def sample_tools(): return [ ChatCompletionToolsParam( type="function", function={ "name": "get_weather", "description": "Get current temperature for a given location.", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "City and country e.g. Bogotá, Colombia", }, "unit": { "type": "string", "description": "this is the unit of temperature", }, }, "required": ["location"], "additionalProperties": False, }, "returns": { "type": "object", "properties": { "temperature": { "type": "number", "description": "temperature in celsius", } }, "required": ["temperature"], "additionalProperties": False, }, "strict": True, }, ), ] def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): # Seed-OSS tool call will not generate id assert actual_tool_call.type == "function" assert actual_tool_call.function == expected_tool_call.function assert actual_tool_call.function.name == expected_tool_call.function.name assert ( actual_tool_call.function.arguments == expected_tool_call.function.arguments ) def test_extract_tool_calls_no_tools(seed_oss_tool_parser): model_output = "This is a test response without any tool calls" extracted_tool_calls = seed_oss_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "tool_call_0_thinking_budget", "tool_call_512_thinkg_budget", "tool_call_unlimited_thinking_budget", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """<seed:tool_call>\n<function=get_weather>\n""" """<parameter=location>Barcelona, Spain</parameter>\n</function>\n</seed:tool_call>""", [ ToolCall( function=FunctionCall( name="get_weather", arguments=json.dumps( { "location": "Barcelona, Spain", }, ), ), type="function", ) ], None, ), ( """<seed:think>The user\'s current thinking budget is 512.</seed:cot_budget_reflect>\nLet me analyze the """ """question. The user wants to know the weather in Barcelona, Spain. Looking at the functions available, """ """there\'s a get_weather function that can retrieve the current temperature for a given location. \n\nFirst, """ """check the parameters required by get_weather: location is mandatory (needs city and country), and unit is """ """optional. The user provided "Barcelona Spain" as the location, which fits the required format (city, """ """country). \n<seed:cot_budget_reflect>I have used 131 tokens, and there are 381 tokens remaining for use.""" """</seed:cot_budget_reflect>\n Since the unit isn\'t specified, the function will default to Celsius, which """ """is fine. \n\nThere\'s no need to ask for more information because the location is clear. So I should call """ """the get_weather function with location set to "Barcelona, Spain" (adding a comma for clarity, though the """ """user\'s input has a space, but the function might accept either; to be safe, using the standard format """ """with a comma).\n<seed:cot_budget_reflect>I have used 257 tokens, and there are 255 tokens remaining for """ """use.</seed:cot_budget_reflect>\n The unit parameter can be omitted since it\'s optional.</seed:think>\n""" """<seed:tool_call>\n<function=get_weather>\n<parameter=location>Barcelona, Spain</parameter>\n</function>""" """\n</seed:tool_call>""", [ ToolCall( function=FunctionCall( name="get_weather", arguments=json.dumps( { "location": "Barcelona, Spain", }, ), ), type="function", ) ], """<seed:think>The user\'s current thinking budget is 512.</seed:cot_budget_reflect>\nLet me analyze the """ """question. The user wants to know the weather in Barcelona, Spain. Looking at the functions available, """ """there\'s a get_weather function that can retrieve the current temperature for a given location. \n\nFirst, """ """check the parameters required by get_weather: location is mandatory (needs city and country), and unit is """ """optional. The user provided "Barcelona Spain" as the location, which fits the required format (city, """ """country). \n<seed:cot_budget_reflect>I have used 131 tokens, and there are 381 tokens remaining for use.""" """</seed:cot_budget_reflect>\n Since the unit isn\'t specified, the function will default to Celsius, which """ """is fine. \n\nThere\'s no need to ask for more information because the location is clear. So I should call """ """the get_weather function with location set to "Barcelona, Spain" (adding a comma for clarity, though the """ """user\'s input has a space, but the function might accept either; to be safe, using the standard format """ """with a comma).\n<seed:cot_budget_reflect>I have used 257 tokens, and there are 255 tokens remaining for """ """use.</seed:cot_budget_reflect>\n The unit parameter can be omitted since it\'s optional.</seed:think>\n""", ), ( """<seed:think>\nGot it, let\'s see. The user asked for the weather in Barcelona, Spain. """ """First, I need to remember the function I can use: get_weather. The function requires a """ """location (city and country) which is "Barcelona, Spain" here, and unit is optional. Since """ """the user didn\'t specify the unit, the default in the function is Celsius, right? Wait, """ """let me check the function docstring again. Oh, the function says unit is optional, and """ """returns temperature in Celsius. So I should call get_weather with location "Barcelona, """ """Spain" and maybe omit unit or set to Celsius. Let me format the function call correctly. """ """The format is <seed:tool_call>\n<function=get_weather>\n<parameter=location>Barcelona, """ """Spain</parameter>\n<parameter=unit>celsius</parameter>\n</function>\n</seed:tool_call>. """ """Wait, but does the unit parameter accept "celsius"? The docstring says unit is the unit """ """of temperature, but the return is in Celsius anyway. Maybe even if I don\'t pass unit, """ """it\'s okay, but to be explicit, maybe pass "celsius". Let me go with that. So the function """ """call should be as above. Then wait for the result to come back and tell the user the """ """temperature in Celsius.</seed:think><seed:tool_call>\n<function=get_weather>\n<parameter=location>""" """Barcelona, Spain</parameter>\n<parameter=unit>celsius</parameter>\n</function>\n</seed:tool_call>""", [ ToolCall( function=FunctionCall( name="get_weather", arguments=json.dumps( { "location": "Barcelona, Spain", "unit": "celsius", }, ), ), type="function", ) ], """<seed:think>\nGot it, let\'s see. The user asked for the weather in Barcelona, Spain. """ """First, I need to remember the function I can use: get_weather. The function requires a """ """location (city and country) which is "Barcelona, Spain" here, and unit is optional. Since """ """the user didn\'t specify the unit, the default in the function is Celsius, right? Wait, """ """let me check the function docstring again. Oh, the function says unit is optional, and """ """returns temperature in Celsius. So I should call get_weather with location "Barcelona, """ """Spain" and maybe omit unit or set to Celsius. Let me format the function call correctly. """ """The format is <seed:tool_call>\n<function=get_weather>\n<parameter=location>Barcelona, """ """Spain</parameter>\n<parameter=unit>celsius</parameter>\n</function>\n</seed:tool_call>. """ """Wait, but does the unit parameter accept "celsius"? The docstring says unit is the unit """ """of temperature, but the return is in Celsius anyway. Maybe even if I don\'t pass unit, """ """it\'s okay, but to be explicit, maybe pass "celsius". Let me go with that. So the function """ """call should be as above. Then wait for the result to come back and tell the user the """ """temperature in Celsius.</seed:think>""", ), ], ) def test_extract_tool_calls( seed_oss_tool_parser, sample_tools, model_output, expected_tool_calls, expected_content, ): request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) extracted_tool_calls = seed_oss_tool_parser.extract_tool_calls( model_output, request=request ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content def test_streaming_tool_calls_no_tools(seed_oss_tool_parser): model_output = "This is a test response without any tool calls" result = seed_oss_tool_parser.extract_tool_calls_streaming( previous_text="his is a test response", current_text=model_output, delta_text=" without any tool calls.", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # Should return the delta text as content assert result is not None assert hasattr(result, "content") assert result.content == " without any tool calls." def stream_delta_message_generator( seed_oss_tool_parser: SeedOssToolParser, seed_oss_tokenizer: TokenizerLike, model_output: str, request: ChatCompletionRequest | None = None, ) -> Generator[DeltaMessage, None, None]: all_token_ids = seed_oss_tokenizer.encode(model_output, add_special_tokens=False) previous_text = "" previous_tokens = None prefix_offset = 0 read_offset = 0 for i, delta_token in enumerate(all_token_ids): delta_token_ids = [delta_token] previous_token_ids = all_token_ids[:i] current_token_ids = all_token_ids[: i + 1] (new_tokens, delta_text, new_prefix_offset, new_read_offset) = ( detokenize_incrementally( tokenizer=seed_oss_tokenizer, all_input_ids=current_token_ids, prev_tokens=previous_tokens, prefix_offset=prefix_offset, read_offset=read_offset, skip_special_tokens=False, spaces_between_special_tokens=True, ) ) current_text = previous_text + delta_text delta_message = seed_oss_tool_parser.extract_tool_calls_streaming( previous_text, current_text, delta_text, previous_token_ids, current_token_ids, delta_token_ids, request=request, ) if delta_message: yield delta_message previous_text = current_text previous_tokens = ( previous_tokens + new_tokens if previous_tokens else new_tokens ) prefix_offset = new_prefix_offset read_offset = new_read_offset @pytest.mark.parametrize( ids=[ "tool_call_0_thinking_budget", "tool_call_512_thinkg_budget", "tool_call_unlimited_thinking_budget", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """<seed:think>\n</seed:cot_budget_reflect>\n</seed:cot_budget_reflect>\n""" """The current thinking budget is 0, so I will directly start answering the question.\n</seed:think>\n""" """<seed:tool_call>\n<function=get_weather>\n""" """<parameter=location>Barcelona, Spain</parameter>\n</function>\n</seed:tool_call>""", [ ToolCall( function=FunctionCall( name="get_weather", arguments=json.dumps( { "location": "Barcelona, Spain", }, ), ), type="function", ) ], """<seed:think>\n</seed:cot_budget_reflect>\n</seed:cot_budget_reflect>\n""" """The current thinking budget is 0, so I will directly start answering the question.\n</seed:think>\n""", ), ( """<seed:think>The user\'s current thinking budget is 512.</seed:cot_budget_reflect>\nLet me analyze the """ """question. The user wants to know the weather in Barcelona, Spain. Looking at the functions available, """ """there\'s a get_weather function that can retrieve the current temperature for a given location. \n\nFirst, """ """check the parameters required by get_weather: location is mandatory (needs city and country), and unit is """ """optional. The user provided "Barcelona Spain" as the location, which fits the required format (city, """ """country). \n<seed:cot_budget_reflect>I have used 131 tokens, and there are 381 tokens remaining for use.""" """</seed:cot_budget_reflect>\n Since the unit isn\'t specified, the function will default to Celsius, which """ """is fine. \n\nThere\'s no need to ask for more information because the location is clear. So I should call """ """the get_weather function with location set to "Barcelona, Spain" (adding a comma for clarity, though the """ """user\'s input has a space, but the function might accept either; to be safe, using the standard format """ """with a comma).\n<seed:cot_budget_reflect>I have used 257 tokens, and there are 255 tokens remaining for """ """use.</seed:cot_budget_reflect>\n The unit parameter can be omitted since it\'s optional.</seed:think>\n""" """<seed:tool_call>\n<function=get_weather>\n<parameter=location>Barcelona, Spain</parameter>\n</function>""" """\n</seed:tool_call>""", [ ToolCall( function=FunctionCall( name="get_weather", arguments=json.dumps( { "location": "Barcelona, Spain", }, ), ), type="function", ) ], """<seed:think>The user\'s current thinking budget is 512.</seed:cot_budget_reflect>\nLet me analyze the """ """question. The user wants to know the weather in Barcelona, Spain. Looking at the functions available, """ """there\'s a get_weather function that can retrieve the current temperature for a given location. \n\nFirst, """ """check the parameters required by get_weather: location is mandatory (needs city and country), and unit is """ """optional. The user provided "Barcelona Spain" as the location, which fits the required format (city, """ """country). \n<seed:cot_budget_reflect>I have used 131 tokens, and there are 381 tokens remaining for use.""" """</seed:cot_budget_reflect>\n Since the unit isn\'t specified, the function will default to Celsius, which """ """is fine. \n\nThere\'s no need to ask for more information because the location is clear. So I should call """ """the get_weather function with location set to "Barcelona, Spain" (adding a comma for clarity, though the """ """user\'s input has a space, but the function might accept either; to be safe, using the standard format """ """with a comma).\n<seed:cot_budget_reflect>I have used 257 tokens, and there are 255 tokens remaining for """ """use.</seed:cot_budget_reflect>\n The unit parameter can be omitted since it\'s optional.</seed:think>\n""", ), ( """<seed:think>\nGot it, let\'s see. The user asked for the weather in Barcelona, Spain. """ """First, I need to remember the function I can use: get_weather. The function requires a """ """location (city and country) which is "Barcelona, Spain" here, and unit is optional. Since """ """the user didn\'t specify the unit, the default in the function is Celsius, right? Wait, """ """let me check the function docstring again. Oh, the function says unit is optional, and """ """returns temperature in Celsius. So I should call get_weather with location "Barcelona, """ """Spain" and maybe omit unit or set to Celsius. Let me format the function call correctly. """ """The format is <seed:tool_call>\n<function=get_weather>\n<parameter=location>Barcelona, """ """Spain</parameter>\n<parameter=unit>celsius</parameter>\n</function>\n</seed:tool_call>. """ """Wait, but does the unit parameter accept "celsius"? The docstring says unit is the unit """ """of temperature, but the return is in Celsius anyway. Maybe even if I don\'t pass unit, """ """it\'s okay, but to be explicit, maybe pass "celsius". Let me go with that. So the function """ """call should be as above. Then wait for the result to come back and tell the user the """ """temperature in Celsius.</seed:think><seed:tool_call>\n<function=get_weather>\n<parameter=location>""" """Barcelona, Spain</parameter>\n<parameter=unit>celsius</parameter>\n</function>\n</seed:tool_call>""", [ ToolCall( function=FunctionCall( name="get_weather", arguments=json.dumps( { "location": "Barcelona, Spain", "unit": "celsius", }, ), ), type="function", ) ], """<seed:think>\nGot it, let\'s see. The user asked for the weather in Barcelona, Spain. """ """First, I need to remember the function I can use: get_weather. The function requires a """ """location (city and country) which is "Barcelona, Spain" here, and unit is optional. Since """ """the user didn\'t specify the unit, the default in the function is Celsius, right? Wait, """ """let me check the function docstring again. Oh, the function says unit is optional, and """ """returns temperature in Celsius. So I should call get_weather with location "Barcelona, """ """Spain" and maybe omit unit or set to Celsius. Let me format the function call correctly. """ """The format is <seed:tool_call>\n<function=get_weather>\n<parameter=location>Barcelona, """ """Spain</parameter>\n<parameter=unit>celsius</parameter>\n</function>\n</seed:tool_call>. """ """Wait, but does the unit parameter accept "celsius"? The docstring says unit is the unit """ """of temperature, but the return is in Celsius anyway. Maybe even if I don\'t pass unit, """ """it\'s okay, but to be explicit, maybe pass "celsius". Let me go with that. So the function """ """call should be as above. Then wait for the result to come back and tell the user the """ """temperature in Celsius.</seed:think>""", ), ], ) def test_streaming_tool_calls( seed_oss_tool_parser, seed_oss_tokenizer, sample_tools, model_output, expected_tool_calls, expected_content, ): """Test incremental streaming behavior""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) other_content = "" tool_states = {} # Track state per tool index for delta_message in stream_delta_message_generator( seed_oss_tool_parser, seed_oss_tokenizer, model_output, request ): # role should never be streamed from tool parser assert not delta_message.role if delta_message.content: other_content += delta_message.content if delta_message.tool_calls: for tool_call in delta_message.tool_calls: idx = tool_call.index # Initialize state for new tool if idx not in tool_states: tool_states[idx] = { "id": None, "name": None, "arguments": "", "type": None, } # First chunk should have id, name, and type if tool_call.id: tool_states[idx]["id"] = tool_call.id if tool_call.type: assert tool_call.type == "function" tool_states[idx]["type"] = tool_call.type if tool_call.function: if tool_call.function.name: # Should only be set once assert tool_states[idx]["name"] is None tool_states[idx]["name"] = tool_call.function.name if tool_call.function.arguments is not None: # Accumulate arguments incrementally tool_states[idx]["arguments"] += tool_call.function.arguments # Verify final content assert other_content == expected_content # Verify we got all expected tool calls assert len(tool_states) == len(expected_tool_calls) # Verify each tool call for idx, expected_tool in enumerate(expected_tool_calls): state = tool_states[idx] assert state["id"] is not None assert state["type"] == "function" assert state["name"] == expected_tool.function.name # Parse accumulated arguments arguments_str = state["arguments"] assert arguments_str is not None actual_args = json.loads(arguments_str) expected_args = json.loads(expected_tool.function.arguments) assert actual_args == expected_args
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_ernie45_moe_tool_parser.py
tests/tool_parsers/test_ernie45_moe_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 import json from collections.abc import Generator import pytest from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, DeltaMessage, FunctionCall, ToolCall, ) from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.tool_parsers.ernie45_tool_parser import Ernie45ToolParser # Use a common model that is likely to be available MODEL = "baidu/ERNIE-4.5-21B-A3B-Thinking" @pytest.fixture(scope="module") def ernie45_tokenizer(): return get_tokenizer(tokenizer_name=MODEL, trust_remote_code=True) @pytest.fixture def ernie45_tool_parser(ernie45_tokenizer): return Ernie45ToolParser(ernie45_tokenizer) def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert isinstance(actual_tool_call.id, str) assert len(actual_tool_call.id) > 0 assert actual_tool_call.type == "function" assert actual_tool_call.function.name == expected_tool_call.function.name # Compare arguments as JSON objects to handle formatting differences actual_args = json.loads(actual_tool_call.function.arguments) expected_args = json.loads(expected_tool_call.function.arguments) assert actual_args == expected_args def test_extract_tool_calls_no_tools(ernie45_tool_parser): model_output = "This is a test" extracted_tool_calls = ernie45_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "single_tool_call", "multiple_tool_calls", "tool_call_with_content_before", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """<tool_call> {"name": "get_current_temperature", "arguments": {"location": "Beijing"}} </tool_call> """, [ ToolCall( function=FunctionCall( name="get_current_temperature", arguments=json.dumps( { "location": "Beijing", } ), ) ) ], None, ), ( """<tool_call> {"name": "get_current_temperature", "arguments": {"location": "Beijing"}} </tool_call> <tool_call> {"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}} </tool_call> """, [ ToolCall( function=FunctionCall( name="get_current_temperature", arguments=json.dumps( { "location": "Beijing", } ), ) ), ToolCall( function=FunctionCall( name="get_temperature_unit", arguments=json.dumps( { "location": "Guangzhou", "unit": "c", } ), ) ), ], None, ), ( """I need to call two tools to handle these two issues separately. </think> <tool_call> {"name": "get_current_temperature", "arguments": {"location": "Beijing"}} </tool_call> <tool_call> {"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}} </tool_call> """, [ ToolCall( function=FunctionCall( name="get_current_temperature", arguments=json.dumps( { "location": "Beijing", } ), ) ), ToolCall( function=FunctionCall( name="get_temperature_unit", arguments=json.dumps( { "location": "Guangzhou", "unit": "c", } ), ) ), ], "I need to call two tools to handle these two issues separately.\n</think>", ), ], ) def test_extract_tool_calls( ernie45_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = ernie45_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content def stream_delta_message_generator( ernie45_tool_parser: Ernie45ToolParser, ernie45_tokenizer: TokenizerLike, model_output: str, request: ChatCompletionRequest | None = None, ) -> Generator[DeltaMessage, None, None]: all_token_ids = ernie45_tokenizer.encode(model_output, add_special_tokens=False) previous_text = "" previous_tokens = None prefix_offset = 0 read_offset = 0 for i, delta_token in enumerate(all_token_ids): delta_token_ids = [delta_token] previous_token_ids = all_token_ids[:i] current_token_ids = all_token_ids[: i + 1] (new_tokens, delta_text, new_prefix_offset, new_read_offset) = ( detokenize_incrementally( tokenizer=ernie45_tokenizer, all_input_ids=current_token_ids, prev_tokens=previous_tokens, prefix_offset=prefix_offset, read_offset=read_offset, skip_special_tokens=False, spaces_between_special_tokens=True, ) ) current_text = previous_text + delta_text delta_message = ernie45_tool_parser.extract_tool_calls_streaming( previous_text, current_text, delta_text, previous_token_ids, current_token_ids, delta_token_ids, request=request, ) if delta_message: yield delta_message previous_text = current_text previous_tokens = ( previous_tokens + new_tokens if previous_tokens else new_tokens ) prefix_offset = new_prefix_offset read_offset = new_read_offset @pytest.mark.parametrize( ids=[ "single_tool_call", "multiple_tool_calls", "tool_call_with_content_before", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """<tool_call> {"name": "get_current_temperature", "arguments": {"location": "Beijing"}} </tool_call> """, [ ToolCall( function=FunctionCall( name="get_current_temperature", arguments=json.dumps( { "location": "Beijing", } ), ) ) ], None, ), ( """<tool_call> {"name": "get_current_temperature", "arguments": {"location": "Beijing"}} </tool_call> <tool_call> {"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}} </tool_call> """, [ ToolCall( function=FunctionCall( name="get_current_temperature", arguments=json.dumps( { "location": "Beijing", } ), ) ), ToolCall( function=FunctionCall( name="get_temperature_unit", arguments=json.dumps( { "location": "Guangzhou", "unit": "c", } ), ) ), ], None, ), ( """I need to call two tools to handle these two issues separately. </think> <tool_call> {"name": "get_current_temperature", "arguments": {"location": "Beijing"}} </tool_call> <tool_call> {"name": "get_temperature_unit", "arguments": {"location": "Guangzhou", "unit": "c"}} </tool_call> """, [ ToolCall( function=FunctionCall( name="get_current_temperature", arguments=json.dumps( { "location": "Beijing", } ), ) ), ToolCall( function=FunctionCall( name="get_temperature_unit", arguments=json.dumps( { "location": "Guangzhou", "unit": "c", } ), ) ), ], "I need to call two tools to handle these two issues separately.\n</think>", ), ], ) def test_extract_tool_calls_streaming_incremental( ernie45_tool_parser, ernie45_tokenizer, model_output, expected_tool_calls, expected_content, ): """Verify the Ernie45 Parser streaming behavior by verifying each chunk is as expected.""" # noqa: E501 request = ChatCompletionRequest(model=MODEL, messages=[], tools=[]) tool_calls_dict = {} for delta_message in stream_delta_message_generator( ernie45_tool_parser, ernie45_tokenizer, model_output, request ): if ( delta_message.role is None and delta_message.content is None and delta_message.reasoning is None and len(delta_message.tool_calls) == 0 ): continue tool_calls = delta_message.tool_calls for tool_call_chunk in tool_calls: index = tool_call_chunk.index if index not in tool_calls_dict: if tool_call_chunk.function.arguments is None: tool_call_chunk.function.arguments = "" tool_calls_dict[index] = tool_call_chunk else: tool_calls_dict[ index ].function.arguments += tool_call_chunk.function.arguments actual_tool_calls = list(tool_calls_dict.values()) assert len(actual_tool_calls) > 0 # check tool call format assert_tool_calls(actual_tool_calls, expected_tool_calls)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_functiongemma_tool_parser.py
tests/tool_parsers/test_functiongemma_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from unittest.mock import MagicMock import pytest from vllm.entrypoints.openai.protocol import ChatCompletionRequest from vllm.tool_parsers.functiongemma_tool_parser import FunctionGemmaToolParser @pytest.fixture def mock_tokenizer(): tokenizer = MagicMock() tokenizer.encode.return_value = [1, 2, 3] tokenizer.get_vocab.return_value = {} return tokenizer @pytest.fixture def parser(mock_tokenizer): return FunctionGemmaToolParser(mock_tokenizer) @pytest.fixture def mock_request(): request = MagicMock(spec=ChatCompletionRequest) request.tools = [] request.tool_choice = "auto" return request class TestExtractToolCalls: def test_no_tool_calls(self, parser, mock_request): model_output = "Hello, how can I help you today?" result = parser.extract_tool_calls(model_output, mock_request) assert result.tools_called is False assert result.tool_calls == [] assert result.content == model_output def test_single_tool_call(self, parser, mock_request): model_output = ( "<start_function_call>call:get_weather{location:<escape>London<escape>}" "<end_function_call>" ) result = parser.extract_tool_calls(model_output, mock_request) assert result.tools_called is True assert len(result.tool_calls) == 1 assert result.tool_calls[0].function.name == "get_weather" assert '"location": "London"' in result.tool_calls[0].function.arguments def test_multiple_arguments(self, parser, mock_request): model_output = ( "<start_function_call>call:get_weather{" "location:<escape>San Francisco<escape>," "unit:<escape>celsius<escape>}" "<end_function_call>" ) result = parser.extract_tool_calls(model_output, mock_request) assert result.tools_called is True assert len(result.tool_calls) == 1 assert result.tool_calls[0].function.name == "get_weather" args = result.tool_calls[0].function.arguments assert "San Francisco" in args assert "celsius" in args def test_text_before_tool_call(self, parser, mock_request): model_output = ( "Let me check the weather for you. " "<start_function_call>call:get_weather{location:<escape>Paris<escape>}" "<end_function_call>" ) result = parser.extract_tool_calls(model_output, mock_request) assert result.tools_called is True assert result.content == "Let me check the weather for you." def test_multiple_tool_calls(self, parser, mock_request): model_output = ( "<start_function_call>call:get_weather{location:<escape>London<escape>}" "<end_function_call>" "<start_function_call>call:get_time{timezone:<escape>UTC<escape>}" "<end_function_call>" ) result = parser.extract_tool_calls(model_output, mock_request) assert result.tools_called is True assert len(result.tool_calls) == 2 assert result.tool_calls[0].function.name == "get_weather" assert result.tool_calls[1].function.name == "get_time" class TestParseArguments: def test_empty_arguments(self, parser): result = parser._parse_arguments("") assert result == {} def test_single_string_argument(self, parser): result = parser._parse_arguments("city:<escape>Tokyo<escape>") assert result == {"city": "Tokyo"} def test_multiple_arguments(self, parser): args_str = "city:<escape>Tokyo<escape>,country:<escape>Japan<escape>" result = parser._parse_arguments(args_str) assert result == {"city": "Tokyo", "country": "Japan"} def test_numeric_argument(self, parser): result = parser._parse_arguments("count:<escape>42<escape>") assert result == {"count": 42} def test_boolean_argument(self, parser): result = parser._parse_arguments("enabled:<escape>true<escape>") assert result == {"enabled": True} def test_argument_with_spaces(self, parser): result = parser._parse_arguments("message:<escape>Hello World<escape>") assert result == {"message": "Hello World"} class TestAdjustRequest: def test_skip_special_tokens_disabled(self, parser, mock_request): mock_request.tools = [{"type": "function", "function": {"name": "test"}}] mock_request.tool_choice = "auto" mock_request.skip_special_tokens = True result = parser.adjust_request(mock_request) assert result.skip_special_tokens is False def test_skip_special_tokens_when_tool_choice_none(self, parser, mock_request): mock_request.tools = [{"type": "function", "function": {"name": "test"}}] mock_request.tool_choice = "none" mock_request.skip_special_tokens = True result = parser.adjust_request(mock_request) assert result.skip_special_tokens is True class TestBufferDeltaText: def test_regular_text_not_buffered(self, parser): result = parser._buffer_delta_text("hello") assert result == "hello" assert parser.buffered_delta_text == "" def test_complete_tag_flushed(self, parser): parser.buffered_delta_text = "<start_function_" result = parser._buffer_delta_text("call>") assert "<start_function_call>" in result if __name__ == "__main__": pytest.main([__file__, "-v"])
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_openai_tool_parser.py
tests/tool_parsers/test_openai_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json import pytest from openai_harmony import ( Conversation, DeveloperContent, HarmonyEncodingName, Message, Role, SystemContent, load_harmony_encoding, ) from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall from vllm.tokenizers import get_tokenizer from vllm.tool_parsers.openai_tool_parser import OpenAIToolParser MODEL = "gpt2" @pytest.fixture(scope="module") def openai_tokenizer(): # The parser does not use the tokenizer, but the constructor requires it. return get_tokenizer(MODEL) @pytest.fixture def openai_tool_parser(openai_tokenizer): return OpenAIToolParser(openai_tokenizer) @pytest.fixture(scope="module") def harmony_encoding(): return load_harmony_encoding(HarmonyEncodingName.HARMONY_GPT_OSS) def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall], ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert isinstance(actual_tool_call.id, str) assert len(actual_tool_call.id) > 16 # Default from protocol.py assert actual_tool_call.type == "function" assert actual_tool_call.function == expected_tool_call.function def test_extract_tool_calls_no_tools(openai_tool_parser, harmony_encoding): convo = Conversation.from_messages( [ Message.from_role_and_content( Role.SYSTEM, SystemContent.new(), ), Message.from_role_and_content( Role.DEVELOPER, DeveloperContent.new().with_instructions("Talk like a pirate!"), ), Message.from_role_and_content(Role.USER, "Arrr, how be you?"), Message.from_role_and_content( Role.ASSISTANT, "This is a test" ).with_channel("final"), ] ) token_ids = harmony_encoding.render_conversation_for_completion( convo, Role.ASSISTANT ) extracted_info = openai_tool_parser.extract_tool_calls( "", request=None, token_ids=token_ids, ) assert not extracted_info.tools_called assert extracted_info.tool_calls == [] assert extracted_info.content == "This is a test" @pytest.mark.parametrize( "tool_args", [ '{"location": "Tokyo"}', '{\n"location": "Tokyo"\n}', ], ) def test_extract_tool_calls_single_tool( openai_tool_parser, harmony_encoding, tool_args ): convo = Conversation.from_messages( [ Message.from_role_and_content(Role.USER, "What is the weather in Tokyo?"), Message.from_role_and_content( Role.ASSISTANT, 'User asks: "What is the weather in Tokyo?" We need to use get_current_weather tool.', # noqa: E501 ).with_channel("analysis"), Message.from_role_and_content(Role.ASSISTANT, tool_args) .with_channel("commentary") .with_recipient("functions.get_current_weather") .with_content_type("json"), ] ) token_ids = harmony_encoding.render_conversation_for_completion( convo, Role.ASSISTANT ) extracted_info = openai_tool_parser.extract_tool_calls( "", request=None, token_ids=token_ids, ) assert extracted_info.tools_called expected_tool_calls = [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps({"location": "Tokyo"}), ) ) ] assert_tool_calls(extracted_info.tool_calls, expected_tool_calls) assert extracted_info.content is None def test_extract_tool_calls_multiple_tools( openai_tool_parser, harmony_encoding, ): convo = Conversation.from_messages( [ Message.from_role_and_content( Role.USER, "What is the weather in Tokyo based on where I'm at?" ), Message.from_role_and_content( Role.ASSISTANT, 'User asks: "What is the weather in Tokyo?" based on their location. We need to use get_current_weather tool and get_user_location tool.', # noqa: E501 ).with_channel("analysis"), Message.from_role_and_content(Role.ASSISTANT, '{"location": "Tokyo"}') .with_channel("commentary") .with_recipient("functions.get_current_weather") .with_content_type("json"), Message.from_role_and_content(Role.ASSISTANT, '{"location": "Tokyo"}') .with_channel("commentary") .with_recipient("functions.get_user_location") .with_content_type("json"), Message.from_role_and_content(Role.ASSISTANT, '{"location": "Tokyo"}') .with_channel("commentary") .with_recipient("functions.no_content_type"), Message.from_role_and_content(Role.ASSISTANT, "foo") .with_channel("commentary") .with_recipient("functions.not_json_no_content_type"), Message.from_role_and_content(Role.ASSISTANT, "{}") .with_channel("commentary") .with_recipient("functions.empty_args") .with_content_type("json"), Message.from_role_and_content(Role.ASSISTANT, "") .with_channel("commentary") .with_recipient("functions.no_args") .with_content_type("json"), ] ) token_ids = harmony_encoding.render_conversation_for_completion( convo, Role.ASSISTANT, ) extracted_info = openai_tool_parser.extract_tool_calls( "", request=None, token_ids=token_ids, ) assert extracted_info.tools_called expected_tool_calls = [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps({"location": "Tokyo"}), ) ), ToolCall( function=FunctionCall( name="get_user_location", arguments=json.dumps({"location": "Tokyo"}), ) ), ToolCall( function=FunctionCall( name="no_content_type", arguments=json.dumps({"location": "Tokyo"}), ) ), ToolCall( function=FunctionCall( name="not_json_no_content_type", arguments="foo", ) ), ToolCall( function=FunctionCall( name="empty_args", arguments=json.dumps({}), ) ), ToolCall( function=FunctionCall( name="no_args", arguments="", ) ), ] assert_tool_calls(extracted_info.tool_calls, expected_tool_calls) assert extracted_info.content is None def test_extract_tool_calls_with_content( openai_tool_parser, harmony_encoding, ): final_content = "This tool call will get the weather." convo = Conversation.from_messages( [ Message.from_role_and_content( Role.USER, "What is the weather in Tokyo based on where I'm at?" ), Message.from_role_and_content( Role.ASSISTANT, 'User asks: "What is the weather in Tokyo?" based on their location. We need to use get_current_weather tool and get_user_location tool.', # noqa: E501 ).with_channel("analysis"), Message.from_role_and_content(Role.ASSISTANT, '{"location": "Tokyo"}') .with_channel("commentary") .with_recipient("functions.get_current_weather") .with_content_type("json"), Message.from_role_and_content(Role.ASSISTANT, final_content).with_channel( "final" ), ] ) token_ids = harmony_encoding.render_conversation_for_completion( convo, Role.ASSISTANT, ) extracted_info = openai_tool_parser.extract_tool_calls( "", request=None, token_ids=token_ids, ) assert extracted_info.tools_called expected_tool_calls = [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps({"location": "Tokyo"}), ) ), ] assert_tool_calls(extracted_info.tool_calls, expected_tool_calls) assert extracted_info.content == final_content
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/__init__.py
tests/tool_parsers/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_jamba_tool_parser.py
tests/tool_parsers/test_jamba_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from collections.abc import Generator import partial_json_parser import pytest from partial_json_parser.core.options import Allow from vllm.entrypoints.openai.protocol import DeltaMessage, FunctionCall, ToolCall from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.tool_parsers.jamba_tool_parser import JambaToolParser MODEL = "ai21labs/Jamba-tiny-dev" @pytest.fixture(scope="module") def jamba_tokenizer(): return get_tokenizer(tokenizer_name=MODEL) @pytest.fixture def jamba_tool_parser(jamba_tokenizer): return JambaToolParser(jamba_tokenizer) def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert isinstance(actual_tool_call.id, str) assert len(actual_tool_call.id) > 16 assert actual_tool_call.type == "function" assert actual_tool_call.function == expected_tool_call.function def stream_delta_message_generator( jamba_tool_parser: JambaToolParser, jamba_tokenizer: TokenizerLike, model_output: str, ) -> Generator[DeltaMessage, None, None]: all_token_ids = jamba_tokenizer.encode(model_output, add_special_tokens=False) previous_text = "" previous_tokens = None prefix_offset = 0 read_offset = 0 for i, delta_token in enumerate(all_token_ids): delta_token_ids = [delta_token] previous_token_ids = all_token_ids[:i] current_token_ids = all_token_ids[: i + 1] (new_tokens, delta_text, new_prefix_offset, new_read_offset) = ( detokenize_incrementally( tokenizer=jamba_tokenizer, all_input_ids=current_token_ids, prev_tokens=previous_tokens, prefix_offset=prefix_offset, read_offset=read_offset, skip_special_tokens=False, spaces_between_special_tokens=True, ) ) current_text = previous_text + delta_text delta_message = jamba_tool_parser.extract_tool_calls_streaming( previous_text, current_text, delta_text, previous_token_ids, current_token_ids, delta_token_ids, request=None, # type: ignore[arg-type] ) if delta_message: yield delta_message previous_text = current_text previous_tokens = ( previous_tokens + new_tokens if previous_tokens else new_tokens ) prefix_offset = new_prefix_offset read_offset = new_read_offset def test_extract_tool_calls_no_tools(jamba_tool_parser): model_output = "This is a test" extracted_tool_calls = jamba_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "single_tool", "single_tool_with_content", "parallel_tools", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """ <tool_calls>[\n {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}\n]</tool_calls>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], None, ), ( """ Sure! let me call the tool for you.<tool_calls>[\n {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}\n]</tool_calls>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], " Sure! let me call the tool for you.", ), ( """ <tool_calls>[\n {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}},\n {"name": "get_current_weather", "arguments": {"city": "Orlando", "state": "FL", "unit": "fahrenheit"}}\n]</tool_calls>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Orlando", "state": "FL", "unit": "fahrenheit"} ), ) ), ], None, ), ], ) def test_extract_tool_calls( jamba_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = jamba_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content @pytest.mark.parametrize( ids=[ "no_tools", "single_tool", "single_tool_with_content", "parallel_tools", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ("""This is a test""", [], """This is a test"""), ( """ <tool_calls>[\n {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}\n]</tool_calls>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], " ", ), ( """ Sure! let me call the tool for you.<tool_calls>[\n {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}\n]</tool_calls>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], " Sure! let me call the tool for you.", ), ( """ <tool_calls>[\n {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}},\n {"name": "get_current_weather", "arguments": {"city": "Orlando", "state": "FL", "unit": "fahrenheit"}}\n]</tool_calls>""", # noqa: E501 [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Orlando", "state": "FL", "unit": "fahrenheit"} ), ) ), ], " ", ), ], ) def test_extract_tool_calls_streaming( jamba_tool_parser, jamba_tokenizer, model_output, expected_tool_calls, expected_content, ): other_content: str = "" function_names: list[str] = [] function_args_strs: list[str] = [] tool_call_idx: int = -1 tool_call_ids: list[str | None] = [] for delta_message in stream_delta_message_generator( jamba_tool_parser, jamba_tokenizer, model_output ): # role should never be streamed from tool parser assert not delta_message.role if delta_message.content: other_content += delta_message.content streamed_tool_calls = delta_message.tool_calls if streamed_tool_calls and len(streamed_tool_calls) > 0: # make sure only one diff is present - correct even for parallel assert len(streamed_tool_calls) == 1 tool_call = streamed_tool_calls[0] # if a new tool is being called, set up empty arguments if tool_call.index != tool_call_idx: tool_call_idx = tool_call.index function_args_strs.append("") tool_call_ids.append(None) # if a tool call ID is streamed, make sure one hasn't been already if tool_call.id and not tool_call_ids[tool_call.index]: tool_call_ids[tool_call.index] = tool_call.id # if parts of the function start being streamed if tool_call.function: # if the function name is defined, set it. it should be streamed # IN ENTIRETY, exactly one time. if tool_call.function.name: assert isinstance(tool_call.function.name, str) function_names.append(tool_call.function.name) if tool_call.function.arguments: # make sure they're a string and then add them to the list assert isinstance(tool_call.function.arguments, str) function_args_strs[tool_call.index] += tool_call.function.arguments assert other_content == expected_content actual_tool_calls = [ ToolCall( id=tool_call_id, function=FunctionCall( name=function_name, arguments=partial_json_parser.ensure_json( function_args_str, Allow.OBJ | Allow.STR ), ), ) for tool_call_id, function_name, function_args_str in zip( tool_call_ids, function_names, function_args_strs ) ] assert_tool_calls(actual_tool_calls, expected_tool_calls)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_qwen3coder_tool_parser.py
tests/tool_parsers/test_qwen3coder_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json from collections.abc import Generator import pytest from vllm.entrypoints.openai.protocol import ( ChatCompletionRequest, ChatCompletionToolsParam, DeltaMessage, FunctionCall, ToolCall, ) from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.tool_parsers.qwen3coder_tool_parser import ( Qwen3CoderToolParser, ) from vllm.tool_parsers.qwen3xml_tool_parser import Qwen3XMLToolParser MODEL = "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8" @pytest.fixture(scope="module") def qwen3_tokenizer(): return get_tokenizer(tokenizer_name=MODEL) @pytest.fixture def qwen3_tool_parser(qwen3_tokenizer): return Qwen3CoderToolParser(qwen3_tokenizer) @pytest.fixture def qwen3_xml_tool_parser(qwen3_tokenizer): return Qwen3XMLToolParser(qwen3_tokenizer) @pytest.fixture(params=["xml"]) def qwen3_tool_parser_parametrized(qwen3_tool_parser, qwen3_xml_tool_parser, request): """Parameterized fixture that provides both parser types for testing""" if request.param == "original": return qwen3_tool_parser else: return qwen3_xml_tool_parser @pytest.fixture def sample_tools(): return [ ChatCompletionToolsParam( type="function", function={ "name": "get_current_weather", "description": "Get the current weather", "parameters": { "type": "object", "properties": { "city": {"type": "string", "description": "The city name"}, "state": {"type": "string", "description": "The state code"}, "unit": {"type": "string", "enum": ["fahrenheit", "celsius"]}, }, "required": ["city", "state"], }, }, ), ChatCompletionToolsParam( type="function", function={ "name": "calculate_area", "description": "Calculate area of a shape", "parameters": { "type": "object", "properties": { "shape": {"type": "string"}, "dimensions": {"type": "object"}, "precision": {"type": "integer"}, }, }, }, ), ] def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): # Qwen3 parser doesn't generate IDs during extraction assert actual_tool_call.type == "function" assert actual_tool_call.function.name == expected_tool_call.function.name assert json.loads(actual_tool_call.function.arguments) == json.loads( expected_tool_call.function.arguments ) def stream_delta_message_generator( qwen3_tool_parser, qwen3_tokenizer: TokenizerLike, model_output: str, request: ChatCompletionRequest | None = None, ) -> Generator[DeltaMessage, None, None]: all_token_ids = qwen3_tokenizer.encode(model_output, add_special_tokens=False) previous_text = "" previous_tokens = None prefix_offset = 0 read_offset = 0 for i, delta_token in enumerate(all_token_ids): delta_token_ids = [delta_token] previous_token_ids = all_token_ids[:i] current_token_ids = all_token_ids[: i + 1] (new_tokens, delta_text, new_prefix_offset, new_read_offset) = ( detokenize_incrementally( tokenizer=qwen3_tokenizer, all_input_ids=current_token_ids, prev_tokens=previous_tokens, prefix_offset=prefix_offset, read_offset=read_offset, skip_special_tokens=False, spaces_between_special_tokens=True, ) ) current_text = previous_text + delta_text delta_message = qwen3_tool_parser.extract_tool_calls_streaming( previous_text, current_text, delta_text, previous_token_ids, current_token_ids, delta_token_ids, request=request, ) if delta_message: yield delta_message previous_text = current_text previous_tokens = ( previous_tokens + new_tokens if previous_tokens else new_tokens ) prefix_offset = new_prefix_offset read_offset = new_read_offset def test_extract_tool_calls_no_tools(qwen3_tool_parser_parametrized): model_output = "This is a test response without any tool calls" extracted_tool_calls = qwen3_tool_parser_parametrized.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "single_tool", "single_tool_with_content", "single_tool_multiline_param", "parallel_tools", "tool_with_typed_params", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """<tool_call> <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], None, ), ( """Sure! Let me check the weather for you.<tool_call> <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], "Sure! Let me check the weather for you.", ), ( """<tool_call> <function=calculate_area> <parameter=shape> rectangle </parameter> <parameter=dimensions> {"width": 10, "height": 20} </parameter> <parameter=precision> 2 </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="calculate_area", arguments=json.dumps( { "shape": "rectangle", "dimensions": {"width": 10, "height": 20}, "precision": 2, } ), ) ) ], None, ), ( """<tool_call> <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call> <tool_call> <function=get_current_weather> <parameter=city> Orlando </parameter> <parameter=state> FL </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Orlando", "state": "FL", "unit": "fahrenheit"} ), ) ), ], None, ), ( """Let me calculate that area for you.<tool_call> <function=calculate_area> <parameter=shape> circle </parameter> <parameter=dimensions> {"radius": 15.5} </parameter> <parameter=precision> 3 </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="calculate_area", arguments=json.dumps( { "shape": "circle", "dimensions": {"radius": 15.5}, "precision": 3, } ), ) ) ], "Let me calculate that area for you.", ), ], ) def test_extract_tool_calls( qwen3_tool_parser_parametrized, sample_tools, model_output, expected_tool_calls, expected_content, ): request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) extracted_tool_calls = qwen3_tool_parser_parametrized.extract_tool_calls( model_output, request=request ) assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content def test_extract_tool_calls_fallback_no_tags( qwen3_tool_parser_parametrized, sample_tools ): """Test fallback parsing when XML tags are missing""" model_output = """<function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> </function>""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) extracted_tool_calls = qwen3_tool_parser_parametrized.extract_tool_calls( model_output, request=request ) assert extracted_tool_calls.tools_called assert len(extracted_tool_calls.tool_calls) == 1 assert extracted_tool_calls.tool_calls[0].function.name == "get_current_weather" def test_extract_tool_calls_type_conversion(qwen3_tool_parser_parametrized): """Test parameter type conversion based on tool schema""" tools = [ ChatCompletionToolsParam( type="function", function={ "name": "test_types", "parameters": { "type": "object", "properties": { "int_param": {"type": "integer"}, "float_param": {"type": "float"}, "bool_param": {"type": "boolean"}, "str_param": {"type": "string"}, "obj_param": {"type": "object"}, }, }, }, ) ] model_output = """<tool_call> <function=test_types> <parameter=int_param> 42 </parameter> <parameter=float_param> 3.14 </parameter> <parameter=bool_param> true </parameter> <parameter=str_param> hello world </parameter> <parameter=obj_param> {"key": "value"} </parameter> </function> </tool_call>""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=tools) extracted_tool_calls = qwen3_tool_parser_parametrized.extract_tool_calls( model_output, request=request ) args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments) assert args["int_param"] == 42 assert args["float_param"] == 3.14 assert args["bool_param"] is True assert args["str_param"] == "hello world" assert args["obj_param"] == {"key": "value"} @pytest.mark.parametrize( ids=[ "no_tools", "single_tool", "single_tool_with_content", "single_tool_multiline_param", "parallel_tools", "tool_with_typed_params", # Added this test case ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ("This is a test without tools", [], "This is a test without tools"), ( """<tool_call> <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], None, ), ( """Sure! Let me check the weather for you.<tool_call> <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ) ], "Sure! Let me check the weather for you.", ), ( """<tool_call> <function=calculate_area> <parameter=shape> rectangle </parameter> <parameter=dimensions> {"width": 10, "height": 20} </parameter> <parameter=precision> 2 </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="calculate_area", arguments=json.dumps( { "shape": "rectangle", "dimensions": {"width": 10, "height": 20}, "precision": 2, } ), ) ) ], None, ), ( """<tool_call> <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call> <tool_call> <function=get_current_weather> <parameter=city> Orlando </parameter> <parameter=state> FL </parameter> <parameter=unit> celsius </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Dallas", "state": "TX", "unit": "fahrenheit"} ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( {"city": "Orlando", "state": "FL", "unit": "celsius"} ), ) ), ], None, ), # Added tool_with_typed_params test case ( """Let me calculate that area for you.<tool_call> <function=calculate_area> <parameter=shape> circle </parameter> <parameter=dimensions> {"radius": 15.5} </parameter> <parameter=precision> 3 </parameter> </function> </tool_call>""", [ ToolCall( function=FunctionCall( name="calculate_area", arguments=json.dumps( { "shape": "circle", "dimensions": {"radius": 15.5}, "precision": 3, } ), ) ) ], "Let me calculate that area for you.", ), ], ) def test_extract_tool_calls_streaming( qwen3_tool_parser_parametrized, qwen3_tokenizer, sample_tools, model_output, expected_tool_calls, expected_content, ): """Test incremental streaming behavior including typed parameters""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) other_content = "" tool_states = {} # Track state per tool index for delta_message in stream_delta_message_generator( qwen3_tool_parser_parametrized, qwen3_tokenizer, model_output, request ): # role should never be streamed from tool parser assert not delta_message.role if delta_message.content: other_content += delta_message.content if delta_message.tool_calls: for tool_call in delta_message.tool_calls: idx = tool_call.index # Initialize state for new tool if idx not in tool_states: tool_states[idx] = { "id": None, "name": None, "arguments": "", "type": None, } # First chunk should have id, name, and type if tool_call.id: tool_states[idx]["id"] = tool_call.id if tool_call.type: assert tool_call.type == "function" tool_states[idx]["type"] = tool_call.type if tool_call.function: if tool_call.function.name: # Should only be set once assert tool_states[idx]["name"] is None tool_states[idx]["name"] = tool_call.function.name if tool_call.function.arguments is not None: # Accumulate arguments incrementally tool_states[idx]["arguments"] += tool_call.function.arguments # Verify final content assert other_content == (expected_content or "") # Handle None case # Verify we got all expected tool calls assert len(tool_states) == len(expected_tool_calls) assert len(qwen3_tool_parser_parametrized.prev_tool_call_arr) == len( expected_tool_calls ) # Verify each tool call for idx, expected_tool in enumerate(expected_tool_calls): state = tool_states[idx] assert state["id"] is not None assert state["type"] == "function" assert state["name"] == expected_tool.function.name # Parse accumulated arguments arguments_str = state["arguments"] assert arguments_str is not None actual_args = json.loads(arguments_str) expected_args = json.loads(expected_tool.function.arguments) assert actual_args == expected_args def test_extract_tool_calls_missing_closing_parameter_tag( qwen3_tool_parser_parametrized, sample_tools ): """Test handling of missing closing </parameter> tag""" # Using get_current_weather from sample_tools but with malformed XML model_output = """Let me check the weather for you: <tool_call> <function=get_current_weather> <parameter=city> Dallas <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) extracted_tool_calls = qwen3_tool_parser_parametrized.extract_tool_calls( model_output, request=request ) # The parser should handle the malformed XML gracefully assert extracted_tool_calls.tools_called assert len(extracted_tool_calls.tool_calls) == 1 # Verify the function name is correct assert extracted_tool_calls.tool_calls[0].function.name == "get_current_weather" # Verify the arguments are parsed despite the missing closing tag args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments) assert "city" in args assert args["city"] == "Dallas" assert args["state"] == "TX" assert args["unit"] == "fahrenheit" # Check that content before the tool call is preserved assert "Let me check the weather for you:" in extracted_tool_calls.content def test_extract_tool_calls_streaming_missing_closing_tag( qwen3_tool_parser_parametrized, qwen3_tokenizer, sample_tools ): """Test streaming with missing closing </parameter> tag""" # Using get_current_weather from sample_tools but with malformed XML model_output = """Let me check the weather for you: <tool_call> <function=get_current_weather> <parameter=city> Dallas <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) other_content = "" tool_states = {} for delta_message in stream_delta_message_generator( qwen3_tool_parser_parametrized, qwen3_tokenizer, model_output, request ): if delta_message.content: other_content += delta_message.content if delta_message.tool_calls: for tool_call in delta_message.tool_calls: idx = tool_call.index if idx not in tool_states: tool_states[idx] = { "id": None, "name": None, "arguments": "", "type": None, } if tool_call.id: tool_states[idx]["id"] = tool_call.id if tool_call.type: assert tool_call.type == "function" tool_states[idx]["type"] = tool_call.type if tool_call.function: if tool_call.function.name: tool_states[idx]["name"] = tool_call.function.name if tool_call.function.arguments is not None: tool_states[idx]["arguments"] += tool_call.function.arguments # Verify content was streamed assert "Let me check the weather for you:" in other_content # Verify we got the tool call assert len(tool_states) == 1 assert len(qwen3_tool_parser_parametrized.prev_tool_call_arr) == 1 state = tool_states[0] assert state["id"] is not None assert state["type"] == "function" assert state["name"] == "get_current_weather" # Verify arguments were parsed correctly despite missing closing tag assert state["arguments"] is not None args = json.loads(state["arguments"]) assert args["city"] == "Dallas" assert args["state"] == "TX" assert args["unit"] == "fahrenheit" def test_extract_tool_calls_streaming_incremental( qwen3_tool_parser_parametrized, qwen3_tokenizer, sample_tools ): """Test that streaming is truly incremental""" model_output = """I'll check the weather.<tool_call> <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> </function> </tool_call>""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) chunks = [] for delta_message in stream_delta_message_generator( qwen3_tool_parser_parametrized, qwen3_tokenizer, model_output, request ): chunks.append(delta_message) # Should have multiple chunks assert len(chunks) > 3 # First chunk(s) should be content assert chunks[0].content is not None assert chunks[0].tool_calls is None or chunks[0].tool_calls == [] # Should have a chunk with tool header (id, name, type) header_found = False for chunk in chunks: if chunk.tool_calls and chunk.tool_calls[0].id: header_found = True assert chunk.tool_calls[0].function.name == "get_current_weather" assert chunk.tool_calls[0].type == "function" # Empty initially assert chunk.tool_calls[0].function.arguments == "" break assert header_found # Should have chunks with incremental arguments arg_chunks = [] for chunk in chunks: if chunk.tool_calls and chunk.tool_calls[0].function.arguments: arg_chunks.append(chunk.tool_calls[0].function.arguments) # Arguments should be streamed incrementally assert len(arg_chunks) > 1 # Concatenated arguments should form valid JSON full_args = "".join(arg_chunks) parsed_args = json.loads(full_args) assert parsed_args["city"] == "Dallas" assert parsed_args["state"] == "TX" def test_extract_tool_calls_complex_type_with_single_quote( qwen3_tool_parser_parametrized, ): """Test parameter type conversion based on tool schema""" tools = [ ChatCompletionToolsParam( type="function", function={ "name": "test_types", "parameters": { "type": "object", "properties": { "int_param": {"type": "integer"}, "float_param": {"type": "float"}, "bool_param": {"type": "boolean"}, "str_param": {"type": "string"}, "obj_param": {"type": "object"}, }, }, }, ) ] model_output = """<tool_call> <function=test_types> <parameter=obj_param> {'key': 'value'} </parameter> </function> </tool_call>""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=tools) extracted_tool_calls = qwen3_tool_parser_parametrized.extract_tool_calls( model_output, request=request ) args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments) assert args["obj_param"] == {"key": "value"} def test_extract_tool_calls_streaming_missing_opening_tag( qwen3_tool_parser_parametrized, qwen3_tokenizer, sample_tools ): """Test streaming with missing opening <tool_call> tag This tests that the streaming parser correctly handles tool calls that start directly with <function=...> """ model_output = """I'll check the weather for you. <function=get_current_weather> <parameter=city> Dallas </parameter> <parameter=state> TX </parameter> <parameter=unit> fahrenheit </parameter> </function> </tool_call>""" request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools) other_content = "" tool_states = {} for delta_message in stream_delta_message_generator( qwen3_tool_parser_parametrized, qwen3_tokenizer, model_output, request ): if delta_message.content: other_content += delta_message.content if delta_message.tool_calls: for tool_call in delta_message.tool_calls: idx = tool_call.index if idx not in tool_states: tool_states[idx] = { "id": None, "name": None, "arguments": "", "type": None, } if tool_call.id: tool_states[idx]["id"] = tool_call.id if tool_call.type: assert tool_call.type == "function" tool_states[idx]["type"] = tool_call.type if tool_call.function: if tool_call.function.name: tool_states[idx]["name"] = tool_call.function.name if tool_call.function.arguments is not None: tool_states[idx]["arguments"] += tool_call.function.arguments # Verify content was streamed assert "I'll check the weather for you." in other_content # Verify we got the tool call assert len(tool_states) == 1 assert len(qwen3_tool_parser_parametrized.prev_tool_call_arr) == 1 state = tool_states[0] assert state["id"] is not None assert state["type"] == "function" assert state["name"] == "get_current_weather" # Verify arguments were parsed correctly despite missing opening tag assert state["arguments"] is not None args = json.loads(state["arguments"]) assert args["city"] == "Dallas" assert args["state"] == "TX" assert args["unit"] == "fahrenheit"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_deepseekv31_tool_parser.py
tests/tool_parsers/test_deepseekv31_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from vllm.tokenizers import get_tokenizer from vllm.tool_parsers.deepseekv31_tool_parser import ( DeepSeekV31ToolParser, ) MODEL = "deepseek-ai/DeepSeek-V3.1" @pytest.fixture(scope="module") def deepseekv31_tokenizer(): return get_tokenizer(tokenizer_name=MODEL) @pytest.fixture def parser(deepseekv31_tokenizer): return DeepSeekV31ToolParser(deepseekv31_tokenizer) def test_extract_tool_calls_with_tool(parser): model_output = ( "normal text" + "<|tool▁calls▁begin|>" + '<|tool▁call▁begin|>foo<|tool▁sep|>{"x":1}<|tool▁call▁end|>' + "<|tool▁calls▁end|>" ) result = parser.extract_tool_calls(model_output, None) assert result.tools_called assert len(result.tool_calls) == 1 assert result.tool_calls[0].function.name == "foo" assert result.tool_calls[0].function.arguments == '{"x":1}' assert result.content == "normal text" def test_extract_tool_calls_with_multiple_tools(parser): model_output = ( "some prefix text" + "<|tool▁calls▁begin|>" + '<|tool▁call▁begin|>foo<|tool▁sep|>{"x":1}<|tool▁call▁end|>' + '<|tool▁call▁begin|>bar<|tool▁sep|>{"y":2}<|tool▁call▁end|>' + "<|tool▁calls▁end|>" + " some suffix text" ) result = parser.extract_tool_calls(model_output, None) assert result.tools_called assert len(result.tool_calls) == 2 assert result.tool_calls[0].function.name == "foo" assert result.tool_calls[0].function.arguments == '{"x":1}' assert result.tool_calls[1].function.name == "bar" assert result.tool_calls[1].function.arguments == '{"y":2}' # prefix is content assert result.content == "some prefix text"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_minimax_tool_parser.py
tests/tool_parsers/test_minimax_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 import json from typing import Any import pytest from vllm.entrypoints.openai.protocol import ( ChatCompletionToolsParam, FunctionCall, ToolCall, ) from vllm.tokenizers import get_tokenizer from vllm.tool_parsers.minimax_tool_parser import MinimaxToolParser # Use a common model that is likely to be available MODEL = "MiniMaxAi/MiniMax-M1-40k" @pytest.fixture(scope="module") def minimax_tokenizer(): return get_tokenizer(tokenizer_name=MODEL) @pytest.fixture def minimax_tool_parser(minimax_tokenizer): return MinimaxToolParser(minimax_tokenizer) @pytest.fixture def sample_tools(): return [ ChatCompletionToolsParam( type="function", function={ "name": "get_current_weather", "description": "Get the current weather", "parameters": { "type": "object", "properties": { "city": {"type": "string", "description": "The city name"}, "state": {"type": "string", "description": "The state code"}, "unit": {"type": "string", "enum": ["fahrenheit", "celsius"]}, }, "required": ["city", "state"], }, }, ), ChatCompletionToolsParam( type="function", function={ "name": "calculate_area", "description": "Calculate area of a shape", "parameters": { "type": "object", "properties": { "shape": {"type": "string"}, "dimensions": {"type": "object"}, "precision": {"type": "integer"}, }, }, }, ), ] def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert isinstance(actual_tool_call.id, str) assert len(actual_tool_call.id) > 16 assert actual_tool_call.type == "function" assert actual_tool_call.function == expected_tool_call.function def test_extract_tool_calls_no_tools(minimax_tool_parser): model_output = "This is a test" extracted_tool_calls = minimax_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "single_tool_call", "multiple_tool_calls", "tool_call_with_content_before", "tool_call_with_single_line_json", "tool_call_incomplete_tag", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """<tool_calls> {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}} </tool_calls>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ) ], None, ), ( """<tool_calls> {"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}} {"name": "get_current_weather", "arguments": {"city": "Orlando", "state": "FL", "unit": "fahrenheit"}} </tool_calls>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Dallas", "state": "TX", "unit": "fahrenheit", } ), ) ), ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Orlando", "state": "FL", "unit": "fahrenheit", } ), ) ), ], None, ), ( """I'll help you check the weather. <tool_calls> {"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA", "unit": "celsius"}} </tool_calls>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Seattle", "state": "WA", "unit": "celsius", } ), ) ) ], "I'll help you check the weather.", ), ( """<tool_calls> {"name": "get_current_weather", "arguments": {"city": "New York", "state": "NY", "unit": "celsius"}} </tool_calls>""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "New York", "state": "NY", "unit": "celsius", } ), ) ) ], None, ), ( """<tool_calls> {"name": "get_current_weather", "arguments": {"city": "Boston", "state": "MA"}}""", [ ToolCall( function=FunctionCall( name="get_current_weather", arguments=json.dumps( { "city": "Boston", "state": "MA", } ), ) ) ], None, ), ], ) def test_extract_tool_calls( minimax_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = minimax_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content def test_preprocess_model_output_with_thinking_tags(minimax_tool_parser): """Test that tool calls within thinking tags are removed during preprocessing.""" model_output = """<think>Let me think about this. <tool_calls> {"name": "fake_tool", "arguments": {"param": "value"}} </tool_calls> This should be removed.</think> I'll help you with that. <tool_calls> {"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA"}} </tool_calls>""" processed_output = minimax_tool_parser.preprocess_model_output(model_output) # The tool call within thinking tags should be removed assert "fake_tool" not in processed_output # But the thinking tag itself should remain assert "<think>" in processed_output assert "</think>" in processed_output # The actual tool call outside thinking tags should remain assert "get_current_weather" in processed_output def test_extract_tool_calls_with_thinking_tags(minimax_tool_parser): """Test tool extraction when thinking tags contain tool calls that should be ignored.""" model_output = """<think>I should use a tool. <tool_calls> {"name": "ignored_tool", "arguments": {"should": "ignore"}} </tool_calls></think> Let me help you with the weather. <tool_calls> {"name": "get_current_weather", "arguments": {"city": "Miami", "state": "FL", "unit": "fahrenheit"}} </tool_calls>""" extracted_tool_calls = minimax_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert len(extracted_tool_calls.tool_calls) == 1 assert extracted_tool_calls.tool_calls[0].function.name == "get_current_weather" # Content extraction is based on the position of the first <tool_calls> in the original model_output # Since preprocessing removes tool calls within thinking tags, the actual first <tool_calls> is the external one expected_content = """<think>I should use a tool. <tool_calls> {"name": "ignored_tool", "arguments": {"should": "ignore"}} </tool_calls></think> Let me help you with the weather.""" assert extracted_tool_calls.content == expected_content def test_extract_tool_calls_invalid_json(minimax_tool_parser): """Test that invalid JSON in tool calls is handled gracefully.""" model_output = """<tool_calls> {"name": "valid_tool", "arguments": {"city": "Seattle"}} {invalid json here} {"name": "another_valid_tool", "arguments": {"param": "value"}} </tool_calls>""" extracted_tool_calls = minimax_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called # Should extract only the valid JSON tool calls assert len(extracted_tool_calls.tool_calls) == 2 assert extracted_tool_calls.tool_calls[0].function.name == "valid_tool" assert extracted_tool_calls.tool_calls[1].function.name == "another_valid_tool" def test_extract_tool_calls_missing_name_or_arguments(minimax_tool_parser): """Test that tool calls missing name or arguments are filtered out.""" model_output = """<tool_calls> {"name": "valid_tool", "arguments": {"city": "Seattle"}} {"name": "missing_args"} {"arguments": {"city": "Portland"}} {"name": "another_valid_tool", "arguments": {"param": "value"}} </tool_calls>""" extracted_tool_calls = minimax_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called # Should extract only the valid tool calls with both name and arguments assert len(extracted_tool_calls.tool_calls) == 2 assert extracted_tool_calls.tool_calls[0].function.name == "valid_tool" assert extracted_tool_calls.tool_calls[1].function.name == "another_valid_tool" def test_streaming_basic_functionality(minimax_tool_parser): """Test basic streaming functionality.""" # Reset streaming state minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.streamed_args_for_tool = [] # Test with a simple tool call current_text = """<tool_calls> {"name": "get_current_weather", "arguments": {"city": "Seattle"}} </tool_calls>""" # First call should handle the initial setup result = minimax_tool_parser.extract_tool_calls_streaming( previous_text="", current_text=current_text, delta_text="</tool_calls>", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # The result might be None or contain tool call information # This depends on the internal state management if result is not None and hasattr(result, "tool_calls") and result.tool_calls: assert len(result.tool_calls) >= 0 def test_streaming_with_content_before_tool_calls(minimax_tool_parser): """Test streaming when there's content before tool calls.""" # Reset streaming state minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.streamed_args_for_tool = [] current_text = "I'll help you with that. <tool_calls>" # When there's content before tool calls, it should be returned as content result = minimax_tool_parser.extract_tool_calls_streaming( previous_text="I'll help you", current_text=current_text, delta_text=" with that. <tool_calls>", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) if result is not None and hasattr(result, "content"): # Should contain some content assert result.content is not None def test_streaming_no_tool_calls(minimax_tool_parser): """Test streaming when there are no tool calls.""" current_text = "This is just regular text without any tool calls." result = minimax_tool_parser.extract_tool_calls_streaming( previous_text="This is just regular text", current_text=current_text, delta_text=" without any tool calls.", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # Should return the delta text as content assert result is not None assert hasattr(result, "content") assert result.content == " without any tool calls." def test_streaming_with_thinking_tags(minimax_tool_parser): """Test streaming with thinking tags that contain tool calls.""" # Reset streaming state minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.streamed_args_for_tool = [] current_text = """<think><tool_calls>{"name": "ignored", "arguments": {}}</tool_calls></think><tool_calls>{"name": "real_tool", "arguments": {"param": "value"}}</tool_calls>""" result = minimax_tool_parser.extract_tool_calls_streaming( previous_text="", current_text=current_text, delta_text=current_text, previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # The preprocessing should remove tool calls from thinking tags # and only process the real tool call if result is not None and hasattr(result, "tool_calls") and result.tool_calls: for tool_call in result.tool_calls: assert tool_call.function.name != "ignored" def test_extract_tool_calls_multiline_json_not_supported(minimax_tool_parser): """Test that multiline JSON in tool calls is not currently supported.""" model_output = """<tool_calls> { "name": "get_current_weather", "arguments": { "city": "New York", "state": "NY", "unit": "celsius" } } </tool_calls>""" extracted_tool_calls = minimax_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] # Multiline JSON is currently not supported, should return no tools called assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content is None def test_streaming_arguments_incremental_output(minimax_tool_parser): """Test that streaming arguments are returned incrementally, not cumulatively.""" # Reset streaming state minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.streamed_args_for_tool = [] # Simulate progressive tool call building stages = [ # Stage 1: Function name complete '<tool_calls>\n{"name": "get_current_weather", "arguments": ', # Stage 2: Arguments object starts with first key '<tool_calls>\n{"name": "get_current_weather", "arguments": {"city": ', # Stage 3: First parameter value added '<tool_calls>\n{"name": "get_current_weather", "arguments": {"city": "Seattle"', # Stage 4: Second parameter added '<tool_calls>\n{"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA"', # Stage 5: Third parameter added, arguments complete '<tool_calls>\n{"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA", "unit": "celsius"}}', # Stage 6: Tool calls closed '<tool_calls>\n{"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA", "unit": "celsius"}}\n</tool', '<tool_calls>\n{"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA", "unit": "celsius"}}\n</tool_calls>', ] function_name_sent = False previous_args_content = "" for i, current_text in enumerate(stages): previous_text = stages[i - 1] if i > 0 else "" delta_text = current_text[len(previous_text) :] if i > 0 else current_text result = minimax_tool_parser.extract_tool_calls_streaming( previous_text=previous_text, current_text=current_text, delta_text=delta_text, previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) print(f"Stage {i}: Current text: {repr(current_text)}") print(f"Stage {i}: Delta text: {repr(delta_text)}") if result is not None and hasattr(result, "tool_calls") and result.tool_calls: tool_call = result.tool_calls[0] # Check if function name is sent (should happen only once) if tool_call.function and tool_call.function.name: assert tool_call.function.name == "get_current_weather" function_name_sent = True print(f"Stage {i}: Function name sent: {tool_call.function.name}") # Check if arguments are sent incrementally if tool_call.function and tool_call.function.arguments: args_fragment = tool_call.function.arguments print(f"Stage {i}: Got arguments fragment: {repr(args_fragment)}") # For incremental output, each fragment should be new content only # The fragment should not contain all previous content if i >= 2 and previous_args_content: # After we start getting arguments # The new fragment should not be identical to or contain all previous content assert args_fragment != previous_args_content, ( f"Fragment should be incremental, not cumulative: {args_fragment}" ) # If this is truly incremental, the fragment should be relatively small # compared to the complete arguments so far if len(args_fragment) > len(previous_args_content): print( "Warning: Fragment seems cumulative rather than incremental" ) previous_args_content = args_fragment # Verify function name was sent at least once assert function_name_sent, "Function name should have been sent" def test_streaming_arguments_delta_only(minimax_tool_parser): """Test that each streaming call returns only the delta (new part) of arguments.""" # Reset streaming state minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.streamed_args_for_tool = [] # Simulate two consecutive calls with growing arguments call1_text = ( '<tool_calls>\n{"name": "test_tool", "arguments": {"param1": "value1"}}' ) call2_text = '<tool_calls>\n{"name": "test_tool", "arguments": {"param1": "value1", "param2": "value2"}}' print(f"Call 1 text: {repr(call1_text)}") print(f"Call 2 text: {repr(call2_text)}") # First call - should get the function name and initial arguments result1 = minimax_tool_parser.extract_tool_calls_streaming( previous_text="", current_text=call1_text, delta_text=call1_text, previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) print(f"Result 1: {result1}") if result1 and hasattr(result1, "tool_calls") and result1.tool_calls: for i, tc in enumerate(result1.tool_calls): print(f" Tool call {i}: {tc}") # Second call - should only get the delta (new part) of arguments result2 = minimax_tool_parser.extract_tool_calls_streaming( previous_text=call1_text, current_text=call2_text, delta_text=', "param2": "value2"}', previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) print(f"Result 2: {result2}") if result2 and hasattr(result2, "tool_calls") and result2.tool_calls: for i, tc in enumerate(result2.tool_calls): print(f" Tool call {i}: {tc}") # Verify the second call only returns the delta if result2 is not None and hasattr(result2, "tool_calls") and result2.tool_calls: tool_call = result2.tool_calls[0] if tool_call.function and tool_call.function.arguments: args_delta = tool_call.function.arguments print(f"Arguments delta from second call: {repr(args_delta)}") # Should only contain the new part, not the full arguments # The delta should be something like ', "param2": "value2"}' or just '"param2": "value2"' assert ( ', "param2": "value2"}' in args_delta or '"param2": "value2"' in args_delta ), f"Expected delta containing param2, got: {args_delta}" # Should NOT contain the previous parameter data assert '"param1": "value1"' not in args_delta, ( f"Arguments delta should not contain previous data: {args_delta}" ) # The delta should be relatively short (incremental, not cumulative) expected_max_length = len(', "param2": "value2"}') + 10 # Some tolerance assert len(args_delta) <= expected_max_length, ( f"Delta seems too long (possibly cumulative): {args_delta}" ) print("✓ Delta validation passed") else: print("No arguments in result2 tool call") else: print("No tool calls in result2 or result2 is None") # This might be acceptable if no incremental update is needed # But let's at least verify that result1 had some content assert result1 is not None, "At least the first call should return something" def test_streaming_openai_compatibility(minimax_tool_parser): """Test that streaming behavior with buffering works correctly.""" # Reset streaming state minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.streamed_args_for_tool = [] # Reset buffering state minimax_tool_parser.pending_buffer = "" minimax_tool_parser.in_thinking_tag = False minimax_tool_parser.thinking_depth = 0 # Test scenario: simple buffering without complex tool call context test_cases: list[dict[str, Any]] = [ { "stage": "Token: <", "previous": "", "current": "<", "delta": "<", "expected_content": None, # Should be buffered }, { "stage": "Token: tool_calls>", "previous": "<", "current": "<tool_calls>", "delta": "tool_calls>", "expected_content": None, # Complete tag, should not output }, { "stage": "Regular content", "previous": "Hello", "current": "Hello world", "delta": " world", "expected_content": " world", # Normal content should pass through }, { "stage": "Content with end tag start", "previous": "Text", "current": "Text content</tool_", "delta": " content</tool_", "expected_content": " content", # Content part output, </tool_ buffered }, { "stage": "Complete end tag", "previous": "Text content</tool_", "current": "Text content</tool_calls>", "delta": "calls>", "expected_content": None, # Complete close tag, should not output }, ] for i, test_case in enumerate(test_cases): print(f"\n--- Stage {i}: {test_case['stage']} ---") print(f"Previous: {repr(test_case['previous'])}") print(f"Current: {repr(test_case['current'])}") print(f"Delta: {repr(test_case['delta'])}") result = minimax_tool_parser.extract_tool_calls_streaming( previous_text=test_case["previous"], current_text=test_case["current"], delta_text=test_case["delta"], previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) print(f"Result: {result}") # Check expected content if test_case["expected_content"] is None: assert result is None or not getattr(result, "content", None), ( f"Stage {i}: Expected no content, got {result}" ) print("✓ No content output as expected") else: assert result is not None and hasattr(result, "content"), ( f"Stage {i}: Expected content, got {result}" ) assert result.content == test_case["expected_content"], ( f"Stage {i}: Expected content {test_case['expected_content']}, got {result.content}" ) print(f"✓ Content matches: {repr(result.content)}") print("✓ Streaming test with buffering completed successfully") def test_streaming_thinking_tag_buffering(minimax_tool_parser): """Test that tool calls within thinking tags are properly handled during streaming.""" # Reset streaming state minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.streamed_args_for_tool = [] # Reset buffering state minimax_tool_parser.pending_buffer = "" minimax_tool_parser.in_thinking_tag = False minimax_tool_parser.thinking_depth = 0 # Test scenario: tool calls within thinking tags should be ignored test_cases: list[dict[str, Any]] = [ { "stage": "Start thinking", "previous": "", "current": "<think>I need to use a tool. <tool_calls>", "delta": "<think>I need to use a tool. <tool_calls>", "expected_content": "<think>I need to use a tool. <tool_calls>", # Should pass through as content }, { "stage": "Tool call in thinking", "previous": "<think>I need to use a tool. <tool_calls>", "current": '<think>I need to use a tool. <tool_calls>\n{"name": "ignored_tool", "arguments": {"param": "value"}}\n</tool_calls>', "delta": '\n{"name": "ignored_tool", "arguments": {"param": "value"}}\n</tool_calls>', "expected_content": '\n{"name": "ignored_tool", "arguments": {"param": "value"}}\n</tool_calls>', # </tool_calls> should be preserved in thinking tags }, { "stage": "Real tool call after thinking", "previous": '<think>I need to use a tool. <tool_calls>\n{"name": "ignored_tool", "arguments": {"param": "value"}}\n</tool_calls></think>', "current": '<think>I need to use a tool. <tool_calls>\n{"name": "ignored_tool", "arguments": {"param": "value"}}\n</tool_calls></think>\n<tool_calls>', "delta": "\n<tool_calls>", "expected_content": "\n", # Should output '\n' and suppress <tool_calls> }, ] for i, test_case in enumerate(test_cases): print(f"\n--- Stage {i}: {test_case['stage']} ---") print(f"Previous: {repr(test_case['previous'])}") print(f"Current: {repr(test_case['current'])}") print(f"Delta: {repr(test_case['delta'])}") result = minimax_tool_parser.extract_tool_calls_streaming( previous_text=test_case["previous"], current_text=test_case["current"], delta_text=test_case["delta"], previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) print(f"Result: {result}") # Check expected content if "expected_content" in test_case: if test_case["expected_content"] is None: assert result is None or not getattr(result, "content", None), ( f"Stage {i}: Expected no content, got {result}" ) else: assert result is not None and hasattr(result, "content"), ( f"Stage {i}: Expected content, got {result}" ) assert result.content == test_case["expected_content"], ( f"Stage {i}: Expected content {test_case['expected_content']}, got {result.content}" ) print(f"✓ Content matches: {repr(result.content)}") # Check tool calls if test_case.get("expected_tool_call"): assert ( result is not None and hasattr(result, "tool_calls") and result.tool_calls ), f"Stage {i}: Expected tool call, got {result}" tool_call = result.tool_calls[0] assert tool_call.function.name == "real_tool", ( f"Expected real_tool, got {tool_call.function.name}" ) print(f"✓ Real tool call detected: {tool_call.function.name}") print("✓ Thinking tag buffering test completed successfully") def reset_streaming_state(minimax_tool_parser): """Helper function to properly reset the streaming state for MinimaxToolParser.""" # Reset minimax-specific state minimax_tool_parser._reset_streaming_state() # Reset base class state (these should still be reset for compatibility) minimax_tool_parser.prev_tool_call_arr = [] minimax_tool_parser.current_tool_id = -1 minimax_tool_parser.current_tool_name_sent = False minimax_tool_parser.streamed_args_for_tool = [] def test_streaming_complex_scenario_with_multiple_tools(minimax_tool_parser): """Test complex streaming scenario: tools inside <think> tags and multiple tool calls in one group.""" # Reset streaming state reset_streaming_state(minimax_tool_parser) # Complex scenario: tools inside thinking tags and multiple tools in one group test_stages: list[dict[str, Any]] = [ { "stage": "Initial content", "previous": "", "current": "Let me help you with this task.", "delta": "Let me help you with this task.", "expected_content": "Let me help you with this task.", "expected_tool_calls": 0, }, { "stage": "Start thinking tag", "previous": "Let me help you with this task.", "current": "Let me help you with this task.<think>I need to analyze this situation first.", "delta": "<think>I need to analyze this situation first.", "expected_content": "<think>I need to analyze this situation first.", "expected_tool_calls": 0, }, { "stage": "Tool call inside thinking tag starts", "previous": "Let me help you with this task.<think>I need to analyze this situation first.", "current": "Let me help you with this task.<think>I need to analyze this situation first.<tool_calls>", "delta": "<tool_calls>", "expected_content": "<tool_calls>", # Inside thinking tags, tool tags should be preserved as content "expected_tool_calls": 0, }, {
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/tool_parsers/test_kimi_k2_tool_parser.py
tests/tool_parsers/test_kimi_k2_tool_parser.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 import json import pytest from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall from vllm.tokenizers import get_tokenizer from vllm.tool_parsers.kimi_k2_tool_parser import KimiK2ToolParser # Use a common model that is likely to be available MODEL = "moonshotai/Kimi-K2-Instruct" @pytest.fixture(scope="module") def kimi_k2_tokenizer(): return get_tokenizer(tokenizer_name=MODEL, trust_remote_code=True) @pytest.fixture def kimi_k2_tool_parser(kimi_k2_tokenizer): return KimiK2ToolParser(kimi_k2_tokenizer) def assert_tool_calls( actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall] ): assert len(actual_tool_calls) == len(expected_tool_calls) for actual_tool_call, expected_tool_call in zip( actual_tool_calls, expected_tool_calls ): assert actual_tool_call.type == "function" assert actual_tool_call.function == expected_tool_call.function # assert tool call id format: should contain function name and numeric index # Format can be either "functions.func_name:0" or "func_name:0" assert actual_tool_call.id.split(":")[-1].isdigit() assert ( actual_tool_call.id.split(":")[0].split(".")[-1] == expected_tool_call.function.name ) def run_streaming_sequence(parser, deltas): """Helper to simulate a streaming sequence and return results.""" previous_text = "" previous_token_ids: list[int] = [] results = [] for delta_text, delta_token_ids in deltas: current_text = previous_text + delta_text current_token_ids = previous_token_ids + delta_token_ids result = parser.extract_tool_calls_streaming( previous_text=previous_text, current_text=current_text, delta_text=delta_text, previous_token_ids=previous_token_ids, current_token_ids=current_token_ids, delta_token_ids=delta_token_ids, request=None, ) results.append(result) previous_text = current_text previous_token_ids = current_token_ids return results def test_extract_tool_calls_no_tools(kimi_k2_tool_parser): model_output = "This is a test" extracted_tool_calls = kimi_k2_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert not extracted_tool_calls.tools_called assert extracted_tool_calls.tool_calls == [] assert extracted_tool_calls.content == model_output @pytest.mark.parametrize( ids=[ "tool_call_with_content_before", "multi_tool_call_with_content_before", "concatenated_tool_calls_bug_fix", "three_concatenated_tool_calls", "mixed_spacing_tool_calls", "angle_brackets_in_json", "newlines_in_json", ], argnames=["model_output", "expected_tool_calls", "expected_content"], argvalues=[ ( """I'll help you check the weather. <|tool_calls_section_begin|> <|tool_call_begin|> functions.get_weather:0 <|tool_call_argument_begin|> {"city": "Beijing"} <|tool_call_end|> <|tool_calls_section_end|>""", [ ToolCall( id="functions.get_weather:0", function=FunctionCall( name="get_weather", arguments=json.dumps( { "city": "Beijing", }, ), ), type="function", ) ], "I'll help you check the weather. ", ), ( """I'll help you check the weather. <|tool_calls_section_begin|> <|tool_call_begin|> functions.get_weather:0 <|tool_call_argument_begin|> {"city": "Beijing"} <|tool_call_end|> <|tool_call_begin|> functions.get_weather:1 <|tool_call_argument_begin|> {"city": "Shanghai"} <|tool_call_end|> <|tool_calls_section_end|>""", [ ToolCall( id="functions.get_weather:0", function=FunctionCall( name="get_weather", arguments=json.dumps( { "city": "Beijing", }, ), ), type="function", ), ToolCall( id="functions.get_weather:1", function=FunctionCall( name="get_weather", arguments=json.dumps( { "city": "Shanghai", }, ), ), type="function", ), ], "I'll help you check the weather. ", ), ( """I'll get the weather and news for LA today. First, let me get the weather using Los Angeles coordinates, and then get the latest news. <|tool_calls_section_begin|><|tool_call_begin|>functions.get_weather:0<|tool_call_argument_begin|>{"latitude": 34.0522, "longitude": -118.2437}<|tool_call_end|><|tool_call_begin|>functions.get_news:1<|tool_call_argument_begin|>{"content": "Los Angeles today"}<|tool_call_end|><|tool_calls_section_end|>""", [ ToolCall( id="functions.get_weather:0", function=FunctionCall( name="get_weather", arguments=json.dumps( {"latitude": 34.0522, "longitude": -118.2437} ), ), type="function", ), ToolCall( id="functions.get_news:1", function=FunctionCall( name="get_news", arguments=json.dumps({"content": "Los Angeles today"}), ), type="function", ), ], "I'll get the weather and news for LA today. First, let me get the weather using Los Angeles coordinates, and then get the latest news. ", ), ( """I'll help you with multiple tasks. <|tool_calls_section_begin|><|tool_call_begin|>functions.get_weather:0<|tool_call_argument_begin|>{"city": "New York"}<|tool_call_end|><|tool_call_begin|>functions.get_news:1<|tool_call_argument_begin|>{"topic": "technology"}<|tool_call_end|><|tool_call_begin|>functions.send_email:2<|tool_call_argument_begin|>{"to": "user@example.com", "subject": "Daily Update"}<|tool_call_end|><|tool_calls_section_end|>""", [ ToolCall( id="functions.get_weather:0", function=FunctionCall( name="get_weather", arguments=json.dumps({"city": "New York"}), ), type="function", ), ToolCall( id="functions.get_news:1", function=FunctionCall( name="get_news", arguments=json.dumps({"topic": "technology"}), ), type="function", ), ToolCall( id="functions.send_email:2", function=FunctionCall( name="send_email", arguments=json.dumps( {"to": "user@example.com", "subject": "Daily Update"} ), ), type="function", ), ], "I'll help you with multiple tasks. ", ), ( """Mixed spacing test. <|tool_calls_section_begin|> <|tool_call_begin|> functions.test:0 <|tool_call_argument_begin|> {} <|tool_call_end|><|tool_call_begin|>functions.test2:1<|tool_call_argument_begin|>{}<|tool_call_end|> <|tool_calls_section_end|>""", [ ToolCall( id="functions.test:0", function=FunctionCall( name="test", arguments=json.dumps({}), ), type="function", ), ToolCall( id="functions.test2:1", function=FunctionCall( name="test2", arguments=json.dumps({}), ), type="function", ), ], "Mixed spacing test. ", ), ( """I need to process HTML content. <|tool_calls_section_begin|><|tool_call_begin|>functions.process_html:0<|tool_call_argument_begin|>{"html": "<div>content</div>", "text": "normal text"}<|tool_call_end|><|tool_calls_section_end|>""", [ ToolCall( id="functions.process_html:0", function=FunctionCall( name="process_html", arguments=json.dumps( {"html": "<div>content</div>", "text": "normal text"} ), ), type="function", ) ], "I need to process HTML content. ", ), ( """I need to process formatted JSON. <|tool_calls_section_begin|><|tool_call_begin|>functions.process_data:0<|tool_call_argument_begin|>{ "name": "test", "value": 123, "nested": { "key": "value" } }<|tool_call_end|><|tool_calls_section_end|>""", [ ToolCall( id="functions.process_data:0", function=FunctionCall( name="process_data", arguments=json.dumps( {"name": "test", "value": 123, "nested": {"key": "value"}}, indent=2, ), ), type="function", ) ], "I need to process formatted JSON. ", ), ], ) def test_extract_tool_calls( kimi_k2_tool_parser, model_output, expected_tool_calls, expected_content ): extracted_tool_calls = kimi_k2_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) assert extracted_tool_calls.content == expected_content def test_extract_tool_calls_invalid_json(kimi_k2_tool_parser): """we'll return every funcall result""" model_output = """I'll help you check the weather. <|tool_calls_section_begin|> <|tool_call_begin|> functions.invalid_get_weather:0 <|tool_call_argument_begin|> {"city": "Beijing" <|tool_call_end|> <|tool_call_begin|> functions.valid_get_weather:1 <|tool_call_argument_begin|> {"city": "Shanghai"} <|tool_call_end|> <|tool_calls_section_end|>""" extracted_tool_calls = kimi_k2_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called # Should extract only the valid JSON tool calls assert len(extracted_tool_calls.tool_calls) == 2 assert extracted_tool_calls.tool_calls[0].function.name == "invalid_get_weather" assert extracted_tool_calls.tool_calls[1].function.name == "valid_get_weather" def test_extract_tool_calls_invalid_funcall(kimi_k2_tool_parser): """we'll return every funcall result""" model_output = """I'll help you check the weather. <|tool_calls_section_begin|> <|tool_call_begin|> functions.invalid_get_weather.0 <|tool_call_argument_begin|> {"city": "Beijing"} <|tool_call_end|> <|tool_call_begin|> functions.valid_get_weather:1 <|tool_call_argument_begin|> {"city": "Shanghai"} <|tool_call_end|> <|tool_calls_section_end|>""" extracted_tool_calls = kimi_k2_tool_parser.extract_tool_calls( model_output, request=None ) # type: ignore[arg-type] assert extracted_tool_calls.tools_called # Should extract only the valid JSON tool calls assert len(extracted_tool_calls.tool_calls) == 1 assert extracted_tool_calls.tool_calls[0].function.name == "valid_get_weather" def test_streaming_basic_functionality(kimi_k2_tool_parser): """Test basic streaming functionality.""" # Reset streaming state kimi_k2_tool_parser.current_tool_name_sent = False kimi_k2_tool_parser.prev_tool_call_arr = [] kimi_k2_tool_parser.current_tool_id = -1 kimi_k2_tool_parser.streamed_args_for_tool = [] # Test with a simple tool call current_text = """ check the weather. <|tool_calls_section_begin|> <|tool_call_begin|> functions.get_weather:0 <|tool_call_argument_begin|> {"city": "Beijing"} <|tool_call_end|> <|tool_calls_section_end|>""" # First call should handle the initial setup result = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="I'll help you", current_text=current_text, delta_text="<|tool_calls_section_end|>", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # The result might be None or contain tool call information # This depends on the internal state management if result is not None and hasattr(result, "tool_calls") and result.tool_calls: assert len(result.tool_calls) >= 0 def test_streaming_no_tool_calls(kimi_k2_tool_parser): """Test streaming when there are no tool calls.""" current_text = "This is just regular text without any tool calls." result = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="This is just regular text", current_text=current_text, delta_text=" without any tool calls.", previous_token_ids=[], current_token_ids=[], delta_token_ids=[], request=None, ) # Should return the delta text as content assert result is not None assert hasattr(result, "content") assert result.content == " without any tool calls." def test_token_leak_between_section_and_tool_begin(kimi_k2_tool_parser): """ Test that text between <|tool_calls_section_begin|> and <|tool_call_begin|> is suppressed and does not leak into reasoning_delta. This is the main vulnerability being fixed. """ kimi_k2_tool_parser.reset_streaming_state() # Get token IDs for the markers section_begin_token_id = kimi_k2_tool_parser.vocab.get( "<|tool_calls_section_begin|>" ) tool_call_begin_token_id = kimi_k2_tool_parser.vocab.get("<|tool_call_begin|>") # Simulate streaming sequence: deltas = [ ("I'll help you with that. ", [1, 2, 3]), ("<|tool_calls_section_begin|>", [section_begin_token_id]), (" spurious text ", [4, 5]), ("<|tool_call_begin|>", [tool_call_begin_token_id]), ] results = run_streaming_sequence(kimi_k2_tool_parser, deltas) # Delta 1: "I'll help you with that. " assert results[0] is not None assert results[0].content == "I'll help you with that. " # Delta 2: "<|tool_calls_section_begin|>" # Section marker should be stripped and suppressed assert results[1] is None or ( results[1].content is None or results[1].content == "" ) # Delta 3: " spurious text or tokens " (THE LEAK SCENARIO) # CRITICAL: This text should be suppressed, NOT returned as reasoning_delta assert results[2] is None or ( results[2].content is None or results[2].content == "" ) # Delta 4: "<|tool_call_begin|>..." # Now we're in tool call mode, result depends on internal state # The key is that the spurious text from Delta 3 was not leaked def test_split_markers_across_deltas(kimi_k2_tool_parser): """ Test that markers split across delta chunks are correctly detected via the rolling buffer mechanism. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_token_id = kimi_k2_tool_parser.vocab.get( "<|tool_calls_section_begin|>" ) # Delta 1: partial token, Delta 2: complete marker deltas = [ ("<|tool_calls_sec", [3]), ("tion_begin|> ", [section_begin_token_id, 4]), ] _results = run_streaming_sequence(kimi_k2_tool_parser, deltas) # Now the complete marker should be detected via buffer assert kimi_k2_tool_parser.in_tool_section is True def test_marker_variants(kimi_k2_tool_parser): """Test that both singular and plural marker variants are recognized.""" kimi_k2_tool_parser.reset_streaming_state() # Test singular variant: <|tool_call_section_begin|> (note: singular "call") singular_token_id = kimi_k2_tool_parser.vocab.get("<|tool_call_section_begin|>") if singular_token_id is not None: # Only test if tokenizer supports it _result = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Reasoning ", current_text="Reasoning <|tool_call_section_begin|>", delta_text="<|tool_call_section_begin|>", previous_token_ids=[1, 2], current_token_ids=[1, 2, singular_token_id], delta_token_ids=[singular_token_id], request=None, ) # Should enter tool section mode with singular variant too assert kimi_k2_tool_parser.in_tool_section is True def test_reentry_to_reasoning_after_tool_section(kimi_k2_tool_parser): """ Test that after exiting a tool section with <|tool_calls_section_end|>, subsequent text is correctly returned as reasoning content. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") section_end_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_end|>") deltas = [ ("<|tool_calls_section_begin|>", [section_begin_id]), ("<|tool_calls_section_end|>", [section_end_id]), (" More reasoning", [10, 11]), ] results = run_streaming_sequence(kimi_k2_tool_parser, deltas) assert kimi_k2_tool_parser.in_tool_section is False assert results[2] is not None assert results[2].content == " More reasoning" def test_empty_tool_section(kimi_k2_tool_parser): """Test an empty tool section (begin immediately followed by end).""" kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") section_end_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_end|>") # Section begin _result1 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Reasoning ", current_text="Reasoning <|tool_calls_section_begin|>", delta_text="<|tool_calls_section_begin|>", previous_token_ids=[1], current_token_ids=[1, section_begin_id], delta_token_ids=[section_begin_id], request=None, ) # Immediate section end _result2 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Reasoning <|tool_calls_section_begin|>", current_text="Reasoning <|tool_calls_section_begin|><|tool_calls_section_end|>", delta_text="<|tool_calls_section_end|>", previous_token_ids=[1, section_begin_id], current_token_ids=[1, section_begin_id, section_end_id], delta_token_ids=[section_end_id], request=None, ) # Should exit cleanly without errors assert kimi_k2_tool_parser.in_tool_section is False def test_malformed_tool_section_recovery(kimi_k2_tool_parser): """ Test that the parser recovers from a malformed tool section that never closes properly. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") # Enter tool section _result1 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="", current_text="<|tool_calls_section_begin|>", delta_text="<|tool_calls_section_begin|>", previous_token_ids=[], current_token_ids=[section_begin_id], delta_token_ids=[section_begin_id], request=None, ) assert kimi_k2_tool_parser.in_tool_section is True # Simulate a lot of text without proper tool calls or section end # This should trigger the error recovery mechanism large_text = "x" * 10000 # Exceeds max_section_chars result2 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="<|tool_calls_section_begin|>", current_text="<|tool_calls_section_begin|>" + large_text, delta_text=large_text, previous_token_ids=[section_begin_id], current_token_ids=[section_begin_id] + list(range(100, 100 + len(large_text))), delta_token_ids=list(range(100, 100 + len(large_text))), request=None, ) # Parser should have force-exited the tool section assert kimi_k2_tool_parser.in_tool_section is False # And returned the content as reasoning assert result2 is not None assert result2.content == large_text def test_state_reset(kimi_k2_tool_parser): """Test that reset_streaming_state() properly clears all state.""" # Put parser in a complex state kimi_k2_tool_parser.in_tool_section = True kimi_k2_tool_parser.token_buffer = "some buffer" kimi_k2_tool_parser.current_tool_id = 5 kimi_k2_tool_parser.prev_tool_call_arr = [{"id": "test"}] kimi_k2_tool_parser.section_char_count = 1000 # Reset kimi_k2_tool_parser.reset_streaming_state() # Verify all state is cleared assert kimi_k2_tool_parser.in_tool_section is False assert kimi_k2_tool_parser.token_buffer == "" assert kimi_k2_tool_parser.current_tool_id == -1 assert kimi_k2_tool_parser.prev_tool_call_arr == [] assert kimi_k2_tool_parser.section_char_count == 0 assert kimi_k2_tool_parser.current_tool_name_sent is False assert kimi_k2_tool_parser.streamed_args_for_tool == [] def test_section_begin_noise_tool_begin_same_chunk(kimi_k2_tool_parser): """ Test that begin→noise→tool_begin within the SAME chunk suppresses the noise text correctly (not just across chunks). """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") tool_call_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_call_begin|>") # Single delta containing: section_begin + spurious text + tool_call_begin combined_text = "<|tool_calls_section_begin|> noise text <|tool_call_begin|>" result = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Reasoning ", current_text="Reasoning " + combined_text, delta_text=combined_text, previous_token_ids=[1, 2], current_token_ids=[1, 2, section_begin_id, 3, 4, tool_call_begin_id], delta_token_ids=[section_begin_id, 3, 4, tool_call_begin_id], request=None, ) # The noise text should NOT leak into content # Result should either be None/empty or start tool call parsing if result is not None and result.content is not None: # If content is returned, it should not contain the noise assert "noise text" not in result.content assert result.content == "" or result.content.strip() == "" def test_stream_ends_without_section_end_marker(kimi_k2_tool_parser): """ Test that if the stream ends (EOF) without a proper section end marker, the parser doesn't leak text, doesn't crash, and resets state cleanly. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") # Enter tool section _result1 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="", current_text="<|tool_calls_section_begin|>", delta_text="<|tool_calls_section_begin|>", previous_token_ids=[], current_token_ids=[section_begin_id], delta_token_ids=[section_begin_id], request=None, ) assert kimi_k2_tool_parser.in_tool_section is True # Some content in tool section result2 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="<|tool_calls_section_begin|>", current_text="<|tool_calls_section_begin|> partial content", delta_text=" partial content", previous_token_ids=[section_begin_id], current_token_ids=[section_begin_id, 10, 11], delta_token_ids=[10, 11], request=None, ) # Content should be suppressed assert result2.content == "" or result2.content is None # Stream ends (EOF) - no more deltas, no section_end marker # Simulate this by manually checking state and resetting # (In real usage, the request handler would call reset_streaming_state) assert kimi_k2_tool_parser.in_tool_section is True # Still in section # Reset state (as would happen between requests) kimi_k2_tool_parser.reset_streaming_state() # Verify clean slate assert kimi_k2_tool_parser.in_tool_section is False assert kimi_k2_tool_parser.token_buffer == "" # Next request should work normally result3 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="", current_text="New reasoning", delta_text="New reasoning", previous_token_ids=[], current_token_ids=[20, 21], delta_token_ids=[20, 21], request=None, ) assert result3 is not None assert result3.content == "New reasoning" def test_same_chunk_begin_and_end_markers(kimi_k2_tool_parser): """ CRITICAL TEST: Verify that when both section_begin and section_end markers appear in the SAME chunk, the parser correctly: 1. Enters the tool section 2. Immediately exits the tool section 3. Does NOT get stuck in in_tool_section=True state This tests the bug fix where elif was changed to if to handle both state transitions in a single delta. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") section_end_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_end|>") # Single chunk with both markers (e.g., empty tool section) combined_delta = "<|tool_calls_section_begin|><|tool_calls_section_end|>" result = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Some reasoning ", current_text="Some reasoning " + combined_delta, delta_text=combined_delta, previous_token_ids=[1, 2], current_token_ids=[1, 2, section_begin_id, section_end_id], delta_token_ids=[section_begin_id, section_end_id], request=None, ) # CRITICAL: Parser should NOT be stuck in tool section assert kimi_k2_tool_parser.in_tool_section is False, ( "Parser stuck in tool section after processing both begin/end in same chunk. " "This indicates the elif bug was not fixed." ) # Result should be empty or contain only stripped content assert result is not None assert result.content == "" or result.content is None # Verify subsequent content streams correctly (not suppressed) result2 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Some reasoning " + combined_delta, current_text="Some reasoning " + combined_delta + " More reasoning", delta_text=" More reasoning", previous_token_ids=[1, 2, section_begin_id, section_end_id], current_token_ids=[1, 2, section_begin_id, section_end_id, 10, 11], delta_token_ids=[10, 11], request=None, ) # This content should NOT be suppressed (we're out of tool section) assert result2 is not None assert result2.content == " More reasoning" def test_same_chunk_begin_content_end_markers(kimi_k2_tool_parser): """ Test the same-chunk scenario with actual content between markers. Example: <|tool_calls_section_begin|> text <|tool_calls_section_end|> all arriving in one delta. The key is that the state machine correctly transitions in and out within the same chunk. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") section_end_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_end|>") # Chunk with begin, some whitespace/noise, and end all together # This simulates a tool section that opens and closes in the same chunk combined_delta = "<|tool_calls_section_begin|> <|tool_calls_section_end|>" _result = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Reasoning ", current_text="Reasoning " + combined_delta, delta_text=combined_delta, previous_token_ids=[1], current_token_ids=[1, section_begin_id, 100, section_end_id], delta_token_ids=[section_begin_id, 100, section_end_id], request=None, ) # Parser should exit cleanly (not stuck in tool section) assert kimi_k2_tool_parser.in_tool_section is False # Verify the fix: next content should stream normally, not be suppressed result2 = kimi_k2_tool_parser.extract_tool_calls_streaming( previous_text="Reasoning " + combined_delta, current_text="Reasoning " + combined_delta + " Done", delta_text=" Done", previous_token_ids=[1, section_begin_id, 100, section_end_id], current_token_ids=[1, section_begin_id, 100, section_end_id, 200], delta_token_ids=[200], request=None, ) # Content after section should be returned (not suppressed) assert result2 is not None assert result2.content == " Done" def test_tool_call_end_and_section_end_same_chunk(kimi_k2_tool_parser): """ CRITICAL TEST (P1): Verify that when both <|tool_call_end|> and <|tool_calls_section_end|> appear in the SAME chunk, the parser: 1. Processes the tool_call_end first (emits final arguments) 2. THEN exits the section 3. Does NOT drop the final tool call update 4. Does NOT leak special tokens into reasoning This tests the deferred section exit fix. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") section_end_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_end|>") tool_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_call_begin|>") tool_end_id = kimi_k2_tool_parser.vocab.get("<|tool_call_end|>") # Simulate a streaming sequence for a SHORT tool call (all in one chunk): combined = ( '<|tool_call_begin|>get_weather:0 <|tool_call_argument_begin|> {"city": "Paris"} ' "<|tool_call_end|><|tool_calls_section_end|>" ) deltas = [ ("Let me help. ", [1, 2]), ("<|tool_calls_section_begin|>", [section_begin_id]), (combined, [tool_begin_id, 10, 11, 12, tool_end_id, section_end_id]), (" Done", [20]), ] results = run_streaming_sequence(kimi_k2_tool_parser, deltas) # CRITICAL: Parser should have exited section AFTER processing tool assert kimi_k2_tool_parser.in_tool_section is False # Tool call should have been emitted (not dropped) if results[2] is not None and results[2].content is not None: # Verify no special tokens leaked into content assert "<|tool_call_end|>" not in results[2].content assert "<|tool_calls_section_end|>" not in results[2].content # Content after tool section should stream normally assert results[3] is not None assert results[3].content == " Done" def test_streaming_tool_call_markers_not_leaked(kimi_k2_tool_parser): """ CRITICAL TEST: Verify that tool call markers (<|tool_call_begin|>, <|tool_call_end|>, <|tool_call_argument_begin|>) are NOT leaked into the content field during streaming. This reproduces the AWS Bedrock bug where tool call markers appeared in the 'text' field of responses. """ kimi_k2_tool_parser.reset_streaming_state() section_begin_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_begin|>") section_end_id = kimi_k2_tool_parser.vocab.get("<|tool_calls_section_end|>")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/samplers/test_beam_search.py
tests/samplers/test_beam_search.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Compare the outputs of HF and vLLM when using beam search. Run `pytest tests/samplers/test_beam_search.py`. """ import pytest from transformers import AutoModelForSeq2SeqLM from vllm.assets.audio import AudioAsset # FIXME(zhuohan): The test can not pass if we: # 1. Increase max_tokens to 256. # 2. Increase beam_width to 8. # 3. Use the model "huggyllama/llama-7b". MAX_TOKENS = [64] BEAM_WIDTHS = [4] MM_BEAM_WIDTHS = [2] MODELS = ["TinyLlama/TinyLlama-1.1B-Chat-v1.0"] @pytest.mark.skip_v1 # FIXME: This fails on V1 right now. @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("max_tokens", MAX_TOKENS) @pytest.mark.parametrize("beam_width", BEAM_WIDTHS) def test_beam_search_single_input( hf_runner, vllm_runner, example_prompts, model: str, dtype: str, max_tokens: int, beam_width: int, ) -> None: example_prompts = example_prompts[:1] with hf_runner(model, dtype=dtype) as hf_model: hf_outputs = hf_model.generate_beam_search( example_prompts, beam_width, max_tokens ) with vllm_runner(model, dtype=dtype) as vllm_model: vllm_outputs = vllm_model.generate_beam_search( example_prompts, beam_width, max_tokens ) for i in range(len(example_prompts)): hf_output_ids, hf_output_texts = hf_outputs[i] vllm_output_ids, vllm_output_texts = vllm_outputs[i] for j, (hf_text, vllm_text) in enumerate( zip(hf_output_texts, vllm_output_texts) ): print(f">>>{j}-th hf output:") print(hf_text) print(f">>>{j}-th vllm output:") print(vllm_text) assert len(hf_output_ids) == len(vllm_output_ids) for j in range(len(hf_output_ids)): assert hf_output_ids[j] == vllm_output_ids[j], ( f"Test{i} output{j}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}" ) @pytest.mark.skip_v1 # FIXME: This fails on V1 right now. @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("max_tokens", MAX_TOKENS) @pytest.mark.parametrize("beam_width", BEAM_WIDTHS) def test_beam_search_with_concurrency_limit( hf_runner, vllm_runner, example_prompts, model: str, dtype: str, max_tokens: int, beam_width: int, ) -> None: # example_prompts[1]&[3]&[7] fails due to unknown reason even without # concurrency limit. skip them for now. example_prompts = example_prompts[:8] concurrency_limit = 2 assert len(example_prompts) > concurrency_limit with vllm_runner(model, dtype=dtype) as vllm_model: outputs_with_limit = vllm_model.generate_beam_search( example_prompts, beam_width, max_tokens, concurrency_limit=concurrency_limit ) outputs_without_limit = [] for i in range(0, len(example_prompts), concurrency_limit): outputs_without_limit.extend( vllm_model.generate_beam_search( example_prompts[i : i + concurrency_limit], beam_width, max_tokens ) ) correct = True for i in range(len(example_prompts)): output_ids_with_limit, output_texts_with_limit = outputs_with_limit[i] output_ids_without_limit, output_texts_without_limit = outputs_without_limit[i] for j, (text_with_limit, text_without_limit) in enumerate( zip(output_texts_with_limit, output_texts_without_limit) ): print(f">>>{j}-th with limit output:") print(text_with_limit) print(f">>>{j}-th without limit output:") print(text_without_limit) assert len(output_ids_with_limit) == len(output_ids_without_limit) for j in range(len(output_ids_with_limit)): if output_ids_with_limit[j] != output_ids_without_limit[j]: print( f"Test{i} output{j}:\n+limit: {output_ids_with_limit}\n" f"-limit: {output_ids_without_limit}" ) correct = False assert correct @pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("max_tokens", MAX_TOKENS) @pytest.mark.parametrize("beam_width", MM_BEAM_WIDTHS) def test_beam_search_passes_multimodal_data( hf_runner, vllm_runner, dtype: str, max_tokens: int, beam_width: int, ) -> None: """Ensure that beam search passes multimodal data through correctly.""" # NOTE - this test is primarily to check that mm data is passed to beams # correctly. As such, we just need to check one extra modality to make # sure things pass through properly. audios = [AudioAsset("mary_had_lamb").audio_and_sample_rate] model = "Qwen/Qwen2-Audio-7B-Instruct" audio_seq = "<|audio_bos|><|AUDIO|><|audio_eos|>" prompts = [ f"<|im_start|>user\n{audio_seq}Can you transcribe this?<|im_end|>\n<|im_start|>assistant\n" # noqa: E501 ] with hf_runner(model, dtype=dtype, auto_cls=AutoModelForSeq2SeqLM) as hf_model: audio_token_id = hf_model.config.audio_token_index eos_token_id = hf_model.tokenizer.eos_token_id # <|im_end|> hf_outputs = hf_model.generate_beam_search( prompts, beam_width=beam_width, max_tokens=max_tokens, audios=audios, ) with vllm_runner(model, dtype=dtype) as vllm_model: vllm_outputs = vllm_model.generate_beam_search( prompts, beam_width=beam_width, max_tokens=max_tokens, audios=audios, ) seq_with_no_audio_toks = lambda seq: [tok for tok in seq if tok != audio_token_id] for i in range(len(prompts)): hf_output_ids, hf_output_texts = hf_outputs[i] vllm_output_ids, vllm_output_texts = vllm_outputs[i] for j, (hf_text, vllm_text) in enumerate( zip(hf_output_texts, vllm_output_texts) ): print(f">>>{j}-th hf output [NOTE: special tokens are filtered]:") print(hf_text) print(f">>>{j}-th vllm output:") print(vllm_text) assert len(hf_output_ids) == len(vllm_output_ids) for j in range(len(hf_output_ids)): # Compare everything except for the audio tokens; we do this since # the IDs returned from the transformers helper expands the audio # token to match features, while the vLLM helper maintains the # single audio token in the input text filtered_hf_output_ids = seq_with_no_audio_toks(hf_output_ids[j]) filtered_vllm_output_ids = seq_with_no_audio_toks(vllm_output_ids[j]) # HF output IDs may contain the end of sequence if len(filtered_hf_output_ids) == len(filtered_vllm_output_ids) + 1: assert filtered_hf_output_ids[-1] == eos_token_id filtered_hf_output_ids = filtered_hf_output_ids[:-1] assert filtered_hf_output_ids == filtered_vllm_output_ids
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/samplers/test_ignore_eos.py
tests/samplers/test_ignore_eos.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Make sure ignore_eos works. Run `pytest tests/samplers/test_ignore_eos.py`. """ import pytest from vllm import SamplingParams # We also test with llama because it has generation_config to specify EOS # (past regression). MODELS = ["distilbert/distilgpt2", "meta-llama/Llama-3.2-1B"] @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("max_tokens", [512]) def test_ignore_eos( vllm_runner, example_prompts, model: str, dtype: str, max_tokens: int, ) -> None: with vllm_runner(model, dtype=dtype) as vllm_model: sampling_params = SamplingParams(max_tokens=max_tokens, ignore_eos=True) for prompt in example_prompts: ignore_eos_output = vllm_model.llm.generate( prompt, sampling_params=sampling_params ) output_length = len(ignore_eos_output[0].outputs[0].token_ids) assert output_length == max_tokens
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/samplers/test_logprobs.py
tests/samplers/test_logprobs.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from vllm import SamplingParams from vllm.logprobs import FlatLogprobs MODELS = ["distilbert/distilgpt2"] MAX_TOKENS = 5 NUM_TOP_LOGPROBS = 5 NUM_PROMPT_LOGPROBS = 7 MAX_LOGPROBS = max(NUM_TOP_LOGPROBS, NUM_PROMPT_LOGPROBS) @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("greedy", [True, False]) @pytest.mark.parametrize("flat_logprobs", [True, False]) def test_ranks( vllm_runner, model, dtype, greedy, flat_logprobs, example_prompts, ): with vllm_runner(model, dtype=dtype, max_logprobs=MAX_LOGPROBS) as vllm_model: tokenizer = vllm_model.llm.get_tokenizer() example_prompt_tokens = [tokenizer.encode(prompt) for prompt in example_prompts] sampling_params = SamplingParams( temperature=0.0 if greedy else 1.0, top_p=1.0, max_tokens=MAX_TOKENS, logprobs=NUM_TOP_LOGPROBS, prompt_logprobs=NUM_PROMPT_LOGPROBS, flat_logprobs=flat_logprobs, ) results = vllm_model.generate_w_logprobs(example_prompts, sampling_params) assert len(results) == len(example_prompt_tokens) for i, (result, prompt_tokens) in enumerate(zip(results, example_prompt_tokens)): decode_tokens, _, decode_logprobs, prompt_logprobs = result # Ensure the return type of logprobs is accurate assert isinstance(prompt_logprobs, FlatLogprobs if flat_logprobs else list) assert isinstance(decode_logprobs, FlatLogprobs if flat_logprobs else list) ######################## # Check prompt logprobs ######################## assert len(prompt_tokens) == len(prompt_logprobs) # No logprob for first prompt token assert not prompt_logprobs[0] for position, (token, logprobs) in enumerate( zip(prompt_tokens[1:], prompt_logprobs[1:]), start=1 ): # Ensure logprobs of prompt token is always returned logprob = logprobs.get(token) assert logprob is not None assert logprob.rank >= 1 # Ensure # of returned logprobs should be # either NUM_PROMPT_LOGPROBS or NUM_PROMPT_LOGPROBS+1 assert NUM_PROMPT_LOGPROBS <= len(logprobs) <= NUM_PROMPT_LOGPROBS + 1 # Ensure top NUM_PROMPT_LOGPROBS is always extracted assert set(range(1, NUM_PROMPT_LOGPROBS + 1)).issubset( {logprob.rank for logprob in logprobs.values()} ) ######################## # Check sample logprobs ######################## assert len(decode_tokens) == len(decode_logprobs) for position, (token, logprobs) in enumerate( zip(decode_tokens, decode_logprobs) ): # Ensure logprobs of chosen token is always returned logprob = logprobs.get(token) assert logprob is not None if greedy: # For greedy sampling, all chosen logprob should be top ranked assert logprob.rank == 1 else: assert logprob.rank >= 1 # Ensure # of returned logprobs should be # either NUM_TOP_LOGPROBS or NUM_TOP_LOGPROBS+1 assert NUM_TOP_LOGPROBS <= len(logprobs) <= NUM_TOP_LOGPROBS + 1 # Ensure top NUM_TOP_LOGPROBS logprobs is always extracted assert set(range(1, NUM_TOP_LOGPROBS + 1)).issubset( {logprob.rank for logprob in logprobs.values()} )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/samplers/__init__.py
tests/samplers/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/samplers/test_no_bad_words.py
tests/samplers/test_no_bad_words.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Make sure bad_words works. Run `pytest tests/samplers/test_no_bad_words.py`. """ from transformers import AutoTokenizer from vllm import LLM, SamplingParams def _generate( llm: LLM, prompt: str, num_prompt_tokens: int, temperature: float = 0, bad_words: list[str] | None = None, ) -> list[int]: sampling_params = SamplingParams( temperature=temperature, bad_words=bad_words, ) # [([output_token_ids, ], [output_text, ]), ] output = llm.generate([prompt], sampling_params=sampling_params) output_token_ids = output[0][0][0][num_prompt_tokens:] # [0] first (and only) request output # [0] token_ids (not text) # [0] first (and only) output completion return output_token_ids class TestOneTokenBadWord: MODEL = "hmellor/tiny-random-LlamaForCausalLM" PROMPT = "How old are " TARGET_TOKEN = "mn" def setup_method(self, method): self.tokenizer = AutoTokenizer.from_pretrained(self.MODEL) self.num_prompt_tokens = len(self._encode(self.PROMPT)) self.target_token_id = self._encode( self.TARGET_TOKEN, add_special_tokens=False )[0] def test_one_token_bad_word(self, vllm_runner): with vllm_runner(self.MODEL) as llm: output_token_ids = self._generate(llm) assert output_token_ids[0] == self.target_token_id output_token_ids = self._generate(llm, bad_words=[self.TARGET_TOKEN]) assert self.target_token_id not in output_token_ids def _generate(self, llm: LLM, bad_words: list[str] | None = None) -> list[int]: return _generate( llm=llm, prompt=self.PROMPT, num_prompt_tokens=self.num_prompt_tokens, bad_words=bad_words, ) def _encode(self, prompt: str, add_special_tokens: bool = True) -> list[int]: return self.tokenizer(prompt, add_special_tokens=add_special_tokens).input_ids class TestTwoTokenBadWord: # Another model (with a different tokenizer behaviour) MODEL = "distilbert/distilgpt2" PROMPT = "How old are you? I am 10" TARGET_TOKEN1 = "years" TARGET_TOKEN2 = "old" NEIGHBOUR_TOKEN2 = "older" def setup_method(self, method): self.tokenizer = AutoTokenizer.from_pretrained( self.MODEL, add_prefix_space=True ) self.num_prompt_tokens = len(self._encode(self.PROMPT)) self.target_token_id1 = self._encode( self.TARGET_TOKEN1, add_special_tokens=False )[0] self.target_token_id2 = self._encode( self.TARGET_TOKEN2, add_special_tokens=False )[0] self.neighbour_token_id2 = self._encode( self.NEIGHBOUR_TOKEN2, add_special_tokens=False )[0] def test_two_token_bad_word(self, vllm_runner): with vllm_runner(self.MODEL, dtype="half") as llm: output_token_ids = self._generate(llm) assert output_token_ids[:2] == [ self.target_token_id1, self.target_token_id2, ] output_token_ids = self._generate(llm, bad_words=[self.TARGET_TOKEN1]) assert self.target_token_id1 not in output_token_ids output_token_ids = self._generate(llm, bad_words=[self.TARGET_TOKEN2]) assert output_token_ids[0] == self.target_token_id1 assert self.target_token_id2 not in output_token_ids output_token_ids = self._generate( llm, bad_words=[f"{self.TARGET_TOKEN1} {self.TARGET_TOKEN2}"] ) assert output_token_ids[0] == self.target_token_id1 assert output_token_ids[:2] != [ self.target_token_id1, self.target_token_id2, ] assert not self._contains( output_token_ids, [self.target_token_id1, self.target_token_id2] ) # Model dependent behaviour assert output_token_ids[:2] == [ self.target_token_id1, self.neighbour_token_id2, ] output_token_ids = self._generate( llm, bad_words=[ f"{self.TARGET_TOKEN1} {self.TARGET_TOKEN2}", f"{self.TARGET_TOKEN1} {self.NEIGHBOUR_TOKEN2}", ], ) assert output_token_ids[0] == self.target_token_id1 assert output_token_ids[:2] != [ self.target_token_id1, self.target_token_id2, ] assert not self._contains( output_token_ids, [self.target_token_id1, self.target_token_id2] ) assert output_token_ids[:2] != [ self.target_token_id1, self.neighbour_token_id2, ] assert not self._contains( output_token_ids, [self.target_token_id1, self.neighbour_token_id2] ) assert (self.target_token_id2 in output_token_ids) or ( self.neighbour_token_id2 in output_token_ids ) def _generate(self, llm: LLM, bad_words: list[str] | None = None) -> list[int]: return _generate( llm=llm, prompt=self.PROMPT, num_prompt_tokens=self.num_prompt_tokens, bad_words=bad_words, ) @staticmethod def _contains(sequence: list[int], subsequence: list[int]) -> bool: searched = False for start in range(len(sequence)): end = start + len(subsequence) current_subsequence = sequence[start:end] if len(current_subsequence) < len(subsequence): continue searched = True assert len(current_subsequence) == len(subsequence) if current_subsequence == subsequence: return True assert searched, "All subsequences did not match in length..." return False def _encode(self, prompt: str, add_special_tokens: bool = True) -> list[int]: return self.tokenizer(prompt, add_special_tokens=add_special_tokens).input_ids
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_sparse_tensor_validation_unit.py
tests/multimodal/test_sparse_tensor_validation_unit.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Unit tests for sparse tensor validation. Simple, fast unit tests that can run without server fixtures. Run with: pytest tests/multimodal/test_sparse_tensor_validation_unit.py -v """ import io import pytest import torch class TestSparseTensorValidationContextManager: """Test that torch.sparse.check_sparse_tensor_invariants() works as expected.""" def test_valid_sparse_tensor_passes(self): """Valid sparse tensors should pass validation.""" indices = torch.tensor([[0, 1], [0, 1]]) values = torch.tensor([1.0, 2.0]) shape = (2, 2) with torch.sparse.check_sparse_tensor_invariants(): tensor = torch.sparse_coo_tensor(indices, values, shape) dense = tensor.to_dense() assert dense.shape == shape def test_out_of_bounds_indices_rejected(self): """Sparse tensors with out-of-bounds indices should be rejected.""" indices = torch.tensor([[5], [5]]) # Out of bounds for 2x2 values = torch.tensor([1.0]) shape = (2, 2) with pytest.raises(RuntimeError) as exc_info: # noqa: SIM117 with torch.sparse.check_sparse_tensor_invariants(): tensor = torch.sparse_coo_tensor(indices, values, shape) tensor.to_dense() assert ( "index" in str(exc_info.value).lower() or "bound" in str(exc_info.value).lower() ) def test_negative_indices_rejected(self): """Sparse tensors with negative indices should be rejected.""" indices = torch.tensor([[-1], [0]]) values = torch.tensor([1.0]) shape = (2, 2) with pytest.raises(RuntimeError): # noqa: SIM117 with torch.sparse.check_sparse_tensor_invariants(): tensor = torch.sparse_coo_tensor(indices, values, shape) tensor.to_dense() def test_without_context_manager_allows_invalid(self): """ WITHOUT validation, invalid tensors may not immediately error. This demonstrates the vulnerability: PyTorch 2.8.0+ doesn't validate by default, which can lead to memory corruption. """ indices = torch.tensor([[100], [100]]) # Way out of bounds values = torch.tensor([1.0]) shape = (2, 2) # Without validation context, this might create an invalid tensor # (actual behavior depends on PyTorch version) tensor = torch.sparse_coo_tensor(indices, values, shape) # The tensor object is created, but it's invalid assert tensor.is_sparse class TestTorchLoadWithValidation: """Test torch.load() with sparse tensor validation.""" def test_load_valid_sparse_tensor_with_validation(self): """Valid sparse tensors should load successfully with validation.""" # Create and save a valid sparse tensor indices = torch.tensor([[0, 1], [0, 1]]) values = torch.tensor([1.0, 2.0]) tensor = torch.sparse_coo_tensor(indices, values, (2, 2)) buffer = io.BytesIO() torch.save(tensor, buffer) buffer.seek(0) # Load with validation with torch.sparse.check_sparse_tensor_invariants(): loaded = torch.load(buffer, weights_only=True) dense = loaded.to_dense() assert dense.shape == (2, 2) def test_load_invalid_sparse_tensor_rejected(self): """Invalid sparse tensors should be caught when loaded with validation.""" # Create an invalid sparse tensor (out of bounds) indices = torch.tensor([[10], [10]]) values = torch.tensor([1.0]) tensor = torch.sparse_coo_tensor(indices, values, (2, 2)) buffer = io.BytesIO() torch.save(tensor, buffer) buffer.seek(0) # Load with validation - should fail on to_dense() with pytest.raises(RuntimeError): # noqa: SIM117 with torch.sparse.check_sparse_tensor_invariants(): loaded = torch.load(buffer, weights_only=True) loaded.to_dense() def test_load_dense_tensor_unaffected(self): """Dense tensors should work normally with the validation context.""" # Create and save a dense tensor tensor = torch.randn(10, 20) buffer = io.BytesIO() torch.save(tensor, buffer) buffer.seek(0) # Load with validation (should have no effect on dense tensors) with torch.sparse.check_sparse_tensor_invariants(): loaded = torch.load(buffer, weights_only=True) assert loaded.shape == (10, 20) assert not loaded.is_sparse if __name__ == "__main__": # Allow running directly for quick testing pytest.main([__file__, "-v", "--tb=short"])
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_registry.py
tests/multimodal/test_registry.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Unit tests for MultiModalRegistry.supports_multimodal_inputs and Qwen2.5-VL visual component loading behavior. """ import pytest from vllm.multimodal import MULTIMODAL_REGISTRY from ..models.utils import build_model_context pytestmark = pytest.mark.cpu_test @pytest.mark.parametrize( "model_id,limit_mm_per_prompt,expected", [ ("Qwen/Qwen2-0.5B-Instruct", {}, False), ("Qwen/Qwen2.5-VL-3B-Instruct", {}, True), ("Qwen/Qwen2.5-VL-3B-Instruct", {"image": 0, "video": 0}, False), ("Qwen/Qwen2.5-VL-3B-Instruct", {"image": 0}, True), ], ) @pytest.mark.core_model def test_supports_multimodal_inputs(model_id, limit_mm_per_prompt, expected): """Test supports_multimodal_inputs returns correct boolean for various configs.""" ctx = build_model_context( model_id, limit_mm_per_prompt=limit_mm_per_prompt, ) assert MULTIMODAL_REGISTRY.supports_multimodal_inputs(ctx.model_config) is expected
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_utils.py
tests/multimodal/test_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import base64 import mimetypes import os from tempfile import NamedTemporaryFile, TemporaryDirectory import numpy as np import pytest import torch from PIL import Image, ImageChops from vllm.multimodal.image import convert_image_mode from vllm.multimodal.inputs import PlaceholderRange from vllm.multimodal.utils import MediaConnector, argsort_mm_positions # Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA) TEST_IMAGE_ASSETS = [ "2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" "Grayscale_8bits_palette_sample_image.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/Grayscale_8bits_palette_sample_image.png", "1280px-Venn_diagram_rgb.svg.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/1280px-Venn_diagram_rgb.svg.png", "RGBA_comp.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/RGBA_comp.png", ] TEST_VIDEO_URLS = [ "https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4", "https://github.com/opencv/opencv/raw/refs/tags/4.12.0/samples/data/vtest.avi", ] @pytest.fixture(scope="module") def url_images(local_asset_server) -> dict[str, Image.Image]: return { image_url: local_asset_server.get_image_asset(image_url) for image_url in TEST_IMAGE_ASSETS } def get_supported_suffixes() -> tuple[str, ...]: # We should at least test the file types mentioned in GPT-4 with Vision OPENAI_SUPPORTED_SUFFIXES = (".png", ".jpeg", ".jpg", ".webp", ".gif") # Additional file types that are supported by us EXTRA_SUPPORTED_SUFFIXES = (".bmp", ".tiff") return OPENAI_SUPPORTED_SUFFIXES + EXTRA_SUPPORTED_SUFFIXES def _image_equals(a: Image.Image, b: Image.Image) -> bool: return (np.asarray(a) == np.asarray(convert_image_mode(b, a.mode))).all() @pytest.mark.asyncio @pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True) async def test_fetch_image_http(image_url: str): connector = MediaConnector() image_sync = connector.fetch_image(image_url) image_async = await connector.fetch_image_async(image_url) assert _image_equals(image_sync, image_async) @pytest.mark.asyncio @pytest.mark.parametrize("raw_image_url", TEST_IMAGE_ASSETS) @pytest.mark.parametrize("suffix", get_supported_suffixes()) async def test_fetch_image_base64( url_images: dict[str, Image.Image], raw_image_url: str, suffix: str ): connector = MediaConnector( # Domain restriction should not apply to data URLs. allowed_media_domains=[ "www.bogotobogo.com", "github.com", ] ) url_image = url_images[raw_image_url] try: mime_type = Image.MIME[Image.registered_extensions()[suffix]] except KeyError: try: mime_type = mimetypes.types_map[suffix] except KeyError: pytest.skip("No MIME type") with NamedTemporaryFile(suffix=suffix) as f: try: url_image.save(f.name) except Exception as e: if e.args[0] == "cannot write mode RGBA as JPEG": pytest.skip("Conversion not supported") raise base64_image = base64.b64encode(f.read()).decode("utf-8") data_url = f"data:{mime_type};base64,{base64_image}" data_image_sync = connector.fetch_image(data_url) if _image_equals(url_image, Image.open(f)): assert _image_equals(url_image, data_image_sync) else: pass # Lossy format; only check that image can be opened data_image_async = await connector.fetch_image_async(data_url) assert _image_equals(data_image_sync, data_image_async) @pytest.mark.asyncio @pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True) async def test_fetch_image_local_files(image_url: str): connector = MediaConnector() with TemporaryDirectory() as temp_dir: local_connector = MediaConnector(allowed_local_media_path=temp_dir) origin_image = connector.fetch_image(image_url) origin_image.save( os.path.join(temp_dir, os.path.basename(image_url)), quality=100, icc_profile=origin_image.info.get("icc_profile"), ) image_async = await local_connector.fetch_image_async( f"file://{temp_dir}/{os.path.basename(image_url)}" ) image_sync = local_connector.fetch_image( f"file://{temp_dir}/{os.path.basename(image_url)}" ) # Check that the images are equal assert not ImageChops.difference(image_sync, image_async).getbbox() with pytest.raises(ValueError, match="must be a subpath"): await local_connector.fetch_image_async( f"file://{temp_dir}/../{os.path.basename(image_url)}" ) with pytest.raises(RuntimeError, match="Cannot load local files"): await connector.fetch_image_async( f"file://{temp_dir}/../{os.path.basename(image_url)}" ) with pytest.raises(ValueError, match="must be a subpath"): local_connector.fetch_image( f"file://{temp_dir}/../{os.path.basename(image_url)}" ) with pytest.raises(RuntimeError, match="Cannot load local files"): connector.fetch_image(f"file://{temp_dir}/../{os.path.basename(image_url)}") @pytest.mark.asyncio @pytest.mark.parametrize("image_url", [TEST_IMAGE_ASSETS[0]], indirect=True) async def test_fetch_image_local_files_with_space_in_name(image_url: str): connector = MediaConnector() with TemporaryDirectory() as temp_dir: local_connector = MediaConnector(allowed_local_media_path=temp_dir) origin_image = connector.fetch_image(image_url) filename = "file name with space.jpg" origin_image.save( os.path.join(temp_dir, filename), quality=100, icc_profile=origin_image.info.get("icc_profile"), ) try: image_async = await local_connector.fetch_image_async( f"file://{temp_dir}/{filename}" ) image_sync = local_connector.fetch_image(f"file://{temp_dir}/{filename}") except FileNotFoundError as e: pytest.fail("Failed to fetch image with space in name: {}".format(e)) # Check that the images are equal assert not ImageChops.difference(image_sync, image_async).getbbox() @pytest.mark.asyncio async def test_fetch_image_error_conversion(): connector = MediaConnector() broken_img = "data:image/png;base64,aGVsbG9fdmxsbV9jb21tdW5pdHkK" # PIL.UnidentifiedImageError should be converted to ValueError with pytest.raises(ValueError): await connector.fetch_image_async(broken_img) with pytest.raises(ValueError): connector.fetch_image(broken_img) @pytest.mark.flaky(reruns=3, reruns_delay=5) @pytest.mark.asyncio @pytest.mark.parametrize("video_url", TEST_VIDEO_URLS) @pytest.mark.parametrize("num_frames", [-1, 32, 1800]) async def test_fetch_video_http(video_url: str, num_frames: int): connector = MediaConnector( media_io_kwargs={ "video": { "num_frames": num_frames, } } ) try: video_sync, metadata_sync = connector.fetch_video(video_url) video_async, metadata_async = await connector.fetch_video_async(video_url) except (TimeoutError, asyncio.TimeoutError) as e: pytest.skip(f"Timeout fetching video (CI network flakiness): {e}") assert np.array_equal(video_sync, video_async) assert metadata_sync == metadata_async @pytest.mark.asyncio @pytest.mark.parametrize("video_url", TEST_VIDEO_URLS) @pytest.mark.parametrize("max_duration", [1, 60, 1800]) @pytest.mark.parametrize("requested_fps", [2, 24]) async def test_fetch_video_http_with_dynamic_loader( video_url: str, max_duration: int, requested_fps: int, monkeypatch: pytest.MonkeyPatch, ): with monkeypatch.context() as m: m.setenv("VLLM_VIDEO_LOADER_BACKEND", "opencv_dynamic") connector = MediaConnector( media_io_kwargs={ "video": { "max_duration": max_duration, "requested_fps": requested_fps, } } ) video_sync, metadata_sync = connector.fetch_video(video_url) video_async, metadata_async = await connector.fetch_video_async(video_url) assert np.array_equal(video_sync, video_async) assert metadata_sync == metadata_async assert metadata_sync["video_backend"] == "opencv_dynamic" @pytest.mark.parametrize( "case", [ # Single modality ## Internally sorted dict( mm_positions={ "image": [ PlaceholderRange(offset=0, length=2), PlaceholderRange(offset=3, length=2), ] }, expected_modality_idxs=[ ("image", 0), ("image", 1), ], ), ## Internally unsorted dict( mm_positions={ "image": [ PlaceholderRange(offset=3, length=2), PlaceholderRange(offset=0, length=2), ] }, expected_modality_idxs=[ ("image", 1), ("image", 0), ], ), # Two modalities ## Internally sorted dict( mm_positions={ "image": [ PlaceholderRange(offset=7, length=4), PlaceholderRange(offset=11, length=5), ], "audio": [ PlaceholderRange(offset=0, length=2), PlaceholderRange(offset=2, length=3), ], }, expected_modality_idxs=[ ("audio", 0), ("audio", 1), ("image", 0), ("image", 1), ], ), ## Interleaved, internally sorted dict( mm_positions={ "image": [ PlaceholderRange(offset=0, length=4), PlaceholderRange(offset=8, length=2), ], "audio": [ PlaceholderRange(offset=5, length=2), PlaceholderRange(offset=11, length=4), ], }, expected_modality_idxs=[ ("image", 0), ("audio", 0), ("image", 1), ("audio", 1), ], ), ## Interleaved, internally unsorted dict( mm_positions={ "image": [ PlaceholderRange(offset=8, length=2), PlaceholderRange(offset=0, length=4), ], "audio": [ PlaceholderRange(offset=11, length=4), PlaceholderRange(offset=5, length=2), ], }, expected_modality_idxs=[ ("image", 1), ("audio", 1), ("image", 0), ("audio", 0), ], ), # Three modalities ## Internally sorted dict( mm_positions={ "image": [ PlaceholderRange(offset=15, length=7), PlaceholderRange(offset=22, length=8), ], "audio": [ PlaceholderRange(offset=0, length=2), ], "video": [ PlaceholderRange(offset=3, length=4), PlaceholderRange(offset=7, length=5), PlaceholderRange(offset=12, length=6), ], }, expected_modality_idxs=[ ("audio", 0), ("video", 0), ("video", 1), ("video", 2), ("image", 0), ("image", 1), ], ), ## Interleaved, internally sorted dict( mm_positions={ "image": [ PlaceholderRange(offset=0, length=2), PlaceholderRange(offset=2, length=3), PlaceholderRange(offset=20, length=4), ], "audio": [ PlaceholderRange(offset=5, length=2), ], "video": [ PlaceholderRange(offset=8, length=5), ], }, expected_modality_idxs=[ ("image", 0), ("image", 1), ("audio", 0), ("video", 0), ("image", 2), ], ), ## Interleaved, internally unsorted dict( mm_positions={ "image": [ PlaceholderRange(offset=0, length=2), PlaceholderRange(offset=20, length=4), PlaceholderRange(offset=2, length=3), ], "audio": [ PlaceholderRange(offset=5, length=2), ], "video": [ PlaceholderRange(offset=8, length=5), ], }, expected_modality_idxs=[ ("image", 0), ("image", 2), ("audio", 0), ("video", 0), ("image", 1), ], ), ], ) def test_argsort_mm_positions(case): mm_positions = case["mm_positions"] expected_modality_idxs = case["expected_modality_idxs"] modality_idxs = argsort_mm_positions(mm_positions) assert modality_idxs == expected_modality_idxs @pytest.mark.parametrize( "is_embed,expected", [ (None, 5), (torch.tensor([True, True, True, True, True]), 5), (torch.tensor([False, False, False, False, False]), 0), (torch.tensor([True, False, True, False, True]), 3), (torch.tensor([True]), 1), ], ) def test_placeholder_range_get_num_embeds(is_embed, expected): length = len(is_embed) if is_embed is not None else 5 pr = PlaceholderRange(offset=0, length=length, is_embed=is_embed) assert pr.get_num_embeds == expected @pytest.mark.parametrize( "is_embed,expected", [ (None, None), ( torch.tensor([False, True, False, True, True]), torch.tensor([0, 1, 1, 2, 3]), ), (torch.tensor([True, True, True]), torch.tensor([1, 2, 3])), ], ) def test_placeholder_range_embeds_cumsum(is_embed, expected): length = len(is_embed) if is_embed is not None else 5 pr = PlaceholderRange(offset=0, length=length, is_embed=is_embed) if expected is None: assert pr.embeds_cumsum is None return assert torch.equal(pr.embeds_cumsum, expected) # cached_property should return the same object on repeated access assert pr.embeds_cumsum is pr.embeds_cumsum @pytest.mark.parametrize( "is_embed,start_idx,end_idx,expected", [ (None, 2, 4, (2, 4)), ( torch.tensor([False, True, False, True, True]), 3, 5, (1, 3), ), ( torch.tensor([False, True, False, True, True]), 0, 2, (0, 1), ), ( torch.tensor([True, False, True, False]), 2, 2, (1, 1), ), ], ) def test_placeholder_range_get_embeds_indices_in_range( is_embed, start_idx, end_idx, expected ): length = len(is_embed) if is_embed is not None else 5 pr = PlaceholderRange(offset=0, length=length, is_embed=is_embed) assert pr.get_embeds_indices_in_range(start_idx, end_idx) == expected @pytest.mark.parametrize( "offset,is_embed,expected", [ (0, None, [(0, 4)]), ( 2, torch.tensor([False, True, False, True, True]), [(3, 3), (5, 6)], ), (0, torch.tensor([True, True, True, True]), [(0, 3)]), (0, torch.tensor([False, False, False, False]), []), ], ) def test_placeholder_range_extract_embeds_range(offset, is_embed, expected): length = len(is_embed) if is_embed is not None else 5 pr = PlaceholderRange(offset=offset, length=length, is_embed=is_embed) assert pr.extract_embeds_range() == expected @pytest.mark.asyncio @pytest.mark.parametrize("video_url", TEST_VIDEO_URLS) @pytest.mark.parametrize("num_frames", [-1, 32, 1800]) async def test_allowed_media_domains(video_url: str, num_frames: int): connector = MediaConnector( media_io_kwargs={ "video": { "num_frames": num_frames, } }, allowed_media_domains=[ "www.bogotobogo.com", "github.com", ], ) video_sync, metadata_sync = connector.fetch_video(video_url) video_async, metadata_async = await connector.fetch_video_async(video_url) assert np.array_equal(video_sync, video_async) assert metadata_sync == metadata_async disallowed_url = "https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png" with pytest.raises(ValueError): _, _ = connector.fetch_video(disallowed_url) with pytest.raises(ValueError): _, _ = await connector.fetch_video_async(disallowed_url)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_video.py
tests/multimodal/test_video.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import tempfile from pathlib import Path import numpy as np import numpy.typing as npt import pytest from PIL import Image from vllm.assets.base import get_vllm_public_assets from vllm.assets.video import video_to_ndarrays, video_to_pil_images_list from vllm.multimodal.image import ImageMediaIO from vllm.multimodal.video import VIDEO_LOADER_REGISTRY, VideoLoader, VideoMediaIO from .utils import cosine_similarity, create_video_from_image, normalize_image pytestmark = pytest.mark.cpu_test ASSETS_DIR = Path(__file__).parent / "assets" NUM_FRAMES = 10 FAKE_OUTPUT_1 = np.random.rand(NUM_FRAMES, 1280, 720, 3) FAKE_OUTPUT_2 = np.random.rand(NUM_FRAMES, 1280, 720, 3) @VIDEO_LOADER_REGISTRY.register("test_video_loader_1") class TestVideoLoader1(VideoLoader): @classmethod def load_bytes(cls, data: bytes, num_frames: int = -1) -> npt.NDArray: return FAKE_OUTPUT_1 @VIDEO_LOADER_REGISTRY.register("test_video_loader_2") class TestVideoLoader2(VideoLoader): @classmethod def load_bytes(cls, data: bytes, num_frames: int = -1) -> npt.NDArray: return FAKE_OUTPUT_2 def test_video_loader_registry(): custom_loader_1 = VIDEO_LOADER_REGISTRY.load("test_video_loader_1") output_1 = custom_loader_1.load_bytes(b"test") np.testing.assert_array_equal(output_1, FAKE_OUTPUT_1) custom_loader_2 = VIDEO_LOADER_REGISTRY.load("test_video_loader_2") output_2 = custom_loader_2.load_bytes(b"test") np.testing.assert_array_equal(output_2, FAKE_OUTPUT_2) def test_video_loader_type_doesnt_exist(): with pytest.raises(AssertionError): VIDEO_LOADER_REGISTRY.load("non_existing_video_loader") @VIDEO_LOADER_REGISTRY.register("assert_10_frames_1_fps") class Assert10Frames1FPSVideoLoader(VideoLoader): @classmethod def load_bytes( cls, data: bytes, num_frames: int = -1, fps: float = -1.0, **kwargs ) -> npt.NDArray: assert num_frames == 10, "bad num_frames" assert fps == 1.0, "bad fps" return FAKE_OUTPUT_2 def test_video_media_io_kwargs(monkeypatch: pytest.MonkeyPatch): with monkeypatch.context() as m: m.setenv("VLLM_VIDEO_LOADER_BACKEND", "assert_10_frames_1_fps") imageio = ImageMediaIO() # Verify that different args pass/fail assertions as expected. videoio = VideoMediaIO(imageio, **{"num_frames": 10, "fps": 1.0}) _ = videoio.load_bytes(b"test") videoio = VideoMediaIO( imageio, **{"num_frames": 10, "fps": 1.0, "not_used": "not_used"} ) _ = videoio.load_bytes(b"test") with pytest.raises(AssertionError, match="bad num_frames"): videoio = VideoMediaIO(imageio, **{}) _ = videoio.load_bytes(b"test") with pytest.raises(AssertionError, match="bad num_frames"): videoio = VideoMediaIO(imageio, **{"num_frames": 9, "fps": 1.0}) _ = videoio.load_bytes(b"test") with pytest.raises(AssertionError, match="bad fps"): videoio = VideoMediaIO(imageio, **{"num_frames": 10, "fps": 2.0}) _ = videoio.load_bytes(b"test") @pytest.mark.parametrize("is_color", [True, False]) @pytest.mark.parametrize("fourcc, ext", [("mp4v", "mp4"), ("XVID", "avi")]) def test_opencv_video_io_colorspace(is_color: bool, fourcc: str, ext: str): """ Test all functions that use OpenCV for video I/O return RGB format. Both RGB and grayscale videos are tested. """ image_path = get_vllm_public_assets( filename="stop_sign.jpg", s3_prefix="vision_model_images" ) image = Image.open(image_path) with tempfile.TemporaryDirectory() as tmpdir: if not is_color: image_path = f"{tmpdir}/test_grayscale_image.png" image = image.convert("L") image.save(image_path) # Convert to gray RGB for comparison image = image.convert("RGB") video_path = f"{tmpdir}/test_RGB_video.{ext}" create_video_from_image( image_path, video_path, num_frames=2, is_color=is_color, fourcc=fourcc, ) frames = video_to_ndarrays(video_path) for frame in frames: sim = cosine_similarity( normalize_image(np.array(frame)), normalize_image(np.array(image)) ) assert np.sum(np.isnan(sim)) / sim.size < 0.001 assert np.nanmean(sim) > 0.99 pil_frames = video_to_pil_images_list(video_path) for frame in pil_frames: sim = cosine_similarity( normalize_image(np.array(frame)), normalize_image(np.array(image)) ) assert np.sum(np.isnan(sim)) / sim.size < 0.001 assert np.nanmean(sim) > 0.99 io_frames, _ = VideoMediaIO(ImageMediaIO()).load_file(Path(video_path)) for frame in io_frames: sim = cosine_similarity( normalize_image(np.array(frame)), normalize_image(np.array(image)) ) assert np.sum(np.isnan(sim)) / sim.size < 0.001 assert np.nanmean(sim) > 0.99 def test_video_backend_handles_broken_frames(monkeypatch: pytest.MonkeyPatch): """ Regression test for handling videos with broken frames. This test uses a pre-corrupted video file (assets/corrupted.mp4) that contains broken frames to verify the video loader handles them gracefully without crashing and returns accurate metadata. """ with monkeypatch.context() as m: m.setenv("VLLM_VIDEO_LOADER_BACKEND", "opencv") # Load the pre-corrupted video file that contains broken frames corrupted_video_path = ASSETS_DIR / "corrupted.mp4" with open(corrupted_video_path, "rb") as f: video_data = f.read() loader = VIDEO_LOADER_REGISTRY.load("opencv") frames, metadata = loader.load_bytes(video_data, num_frames=-1) # Verify metadata consistency: # frames_indices must match actual loaded frames assert frames.shape[0] == len(metadata["frames_indices"]), ( f"Frames array size must equal frames_indices length. " f"Got {frames.shape[0]} frames but " f"{len(metadata['frames_indices'])} indices" ) # Verify that broken frames were skipped: # loaded frames should be less than total assert frames.shape[0] < metadata["total_num_frames"], ( f"Should load fewer frames than total due to broken frames. " f"Expected fewer than {metadata['total_num_frames']} frames, " f"but loaded {frames.shape[0]} frames" ) @VIDEO_LOADER_REGISTRY.register("test_video_backend_override_1") class TestVideoBackendOverride1(VideoLoader): """Test loader that returns FAKE_OUTPUT_1 to verify backend selection.""" @classmethod def load_bytes( cls, data: bytes, num_frames: int = -1, **kwargs ) -> tuple[npt.NDArray, dict]: return FAKE_OUTPUT_1, {"video_backend": "test_video_backend_override_1"} @VIDEO_LOADER_REGISTRY.register("test_video_backend_override_2") class TestVideoBackendOverride2(VideoLoader): """Test loader that returns FAKE_OUTPUT_2 to verify backend selection.""" @classmethod def load_bytes( cls, data: bytes, num_frames: int = -1, **kwargs ) -> tuple[npt.NDArray, dict]: return FAKE_OUTPUT_2, {"video_backend": "test_video_backend_override_2"} def test_video_media_io_backend_kwarg_override(monkeypatch: pytest.MonkeyPatch): """ Test that video_backend kwarg can override the VLLM_VIDEO_LOADER_BACKEND environment variable. This allows users to dynamically select a different video backend via --media-io-kwargs without changing the global env var, which is useful when plugins set a default backend but a specific request needs a different one. """ with monkeypatch.context() as m: # Set the env var to one backend m.setenv("VLLM_VIDEO_LOADER_BACKEND", "test_video_backend_override_1") imageio = ImageMediaIO() # Without video_backend kwarg, should use env var backend videoio_default = VideoMediaIO(imageio, num_frames=10) frames_default, metadata_default = videoio_default.load_bytes(b"test") np.testing.assert_array_equal(frames_default, FAKE_OUTPUT_1) assert metadata_default["video_backend"] == "test_video_backend_override_1" # With video_backend kwarg, should override env var videoio_override = VideoMediaIO( imageio, num_frames=10, video_backend="test_video_backend_override_2" ) frames_override, metadata_override = videoio_override.load_bytes(b"test") np.testing.assert_array_equal(frames_override, FAKE_OUTPUT_2) assert metadata_override["video_backend"] == "test_video_backend_override_2" def test_video_media_io_backend_kwarg_not_passed_to_loader( monkeypatch: pytest.MonkeyPatch, ): """ Test that video_backend kwarg is consumed by VideoMediaIO and NOT passed through to the underlying video loader's load_bytes method. This ensures the kwarg is properly popped from kwargs before forwarding. """ @VIDEO_LOADER_REGISTRY.register("test_reject_video_backend_kwarg") class RejectVideoBackendKwargLoader(VideoLoader): """Test loader that fails if video_backend is passed through.""" @classmethod def load_bytes( cls, data: bytes, num_frames: int = -1, **kwargs ) -> tuple[npt.NDArray, dict]: # This should never receive video_backend in kwargs if "video_backend" in kwargs: raise AssertionError( "video_backend should be consumed by VideoMediaIO, " "not passed to loader" ) return FAKE_OUTPUT_1, {"received_kwargs": list(kwargs.keys())} with monkeypatch.context() as m: m.setenv("VLLM_VIDEO_LOADER_BACKEND", "test_reject_video_backend_kwarg") imageio = ImageMediaIO() # Even when video_backend is provided, it should NOT be passed to loader videoio = VideoMediaIO( imageio, num_frames=10, video_backend="test_reject_video_backend_kwarg", other_kwarg="should_pass_through", ) # This should NOT raise AssertionError frames, metadata = videoio.load_bytes(b"test") np.testing.assert_array_equal(frames, FAKE_OUTPUT_1) # Verify other kwargs are still passed through assert "other_kwarg" in metadata["received_kwargs"] def test_video_media_io_backend_env_var_fallback(monkeypatch: pytest.MonkeyPatch): """ Test that when video_backend kwarg is None or not provided, VideoMediaIO falls back to VLLM_VIDEO_LOADER_BACKEND env var. """ with monkeypatch.context() as m: m.setenv("VLLM_VIDEO_LOADER_BACKEND", "test_video_backend_override_2") imageio = ImageMediaIO() # Explicit None should fall back to env var videoio_none = VideoMediaIO(imageio, num_frames=10, video_backend=None) frames_none, metadata_none = videoio_none.load_bytes(b"test") np.testing.assert_array_equal(frames_none, FAKE_OUTPUT_2) assert metadata_none["video_backend"] == "test_video_backend_override_2" # Not providing video_backend should also fall back to env var videoio_missing = VideoMediaIO(imageio, num_frames=10) frames_missing, metadata_missing = videoio_missing.load_bytes(b"test") np.testing.assert_array_equal(frames_missing, FAKE_OUTPUT_2) assert metadata_missing["video_backend"] == "test_video_backend_override_2"
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_audio.py
tests/multimodal/test_audio.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # test_audio.py import base64 from pathlib import Path from unittest.mock import patch import numpy as np import pytest from vllm.multimodal.audio import ( AudioMediaIO, AudioResampler, resample_audio_librosa, resample_audio_scipy, ) @pytest.fixture def dummy_audio(): return np.array([0.0, 0.1, 0.2, 0.3, 0.4], dtype=float) def test_resample_audio_librosa(dummy_audio): with patch("vllm.multimodal.audio.librosa.resample") as mock_resample: mock_resample.return_value = dummy_audio * 2 out = resample_audio_librosa(dummy_audio, orig_sr=44100, target_sr=22050) mock_resample.assert_called_once_with( dummy_audio, orig_sr=44100, target_sr=22050 ) assert np.all(out == dummy_audio * 2) def test_resample_audio_scipy(dummy_audio): out_down = resample_audio_scipy(dummy_audio, orig_sr=4, target_sr=2) out_up = resample_audio_scipy(dummy_audio, orig_sr=2, target_sr=4) out_same = resample_audio_scipy(dummy_audio, orig_sr=4, target_sr=4) assert len(out_down) == 3 assert len(out_up) == 10 assert np.all(out_same == dummy_audio) @pytest.mark.xfail(reason="resample_audio_scipy is buggy for non-integer ratios") def test_resample_audio_scipy_non_integer_ratio(dummy_audio): out = resample_audio_scipy(dummy_audio, orig_sr=5, target_sr=3) expected_len = int(round(len(dummy_audio) * 3 / 5)) assert len(out) == expected_len assert isinstance(out, np.ndarray) assert np.isfinite(out).all() def test_audio_resampler_librosa_calls_resample(dummy_audio): resampler = AudioResampler(target_sr=22050, method="librosa") with patch("vllm.multimodal.audio.resample_audio_librosa") as mock_resample: mock_resample.return_value = dummy_audio out = resampler.resample(dummy_audio, orig_sr=44100) mock_resample.assert_called_once_with( dummy_audio, orig_sr=44100, target_sr=22050 ) assert np.all(out == dummy_audio) def test_audio_resampler_scipy_calls_resample(dummy_audio): resampler = AudioResampler(target_sr=22050, method="scipy") with patch("vllm.multimodal.audio.resample_audio_scipy") as mock_resample: mock_resample.return_value = dummy_audio out = resampler.resample(dummy_audio, orig_sr=44100) mock_resample.assert_called_once_with( dummy_audio, orig_sr=44100, target_sr=22050 ) assert np.all(out == dummy_audio) def test_audio_resampler_invalid_method(dummy_audio): resampler = AudioResampler(target_sr=22050, method="invalid") with pytest.raises(ValueError): resampler.resample(dummy_audio, orig_sr=44100) def test_audio_resampler_no_target_sr(dummy_audio): resampler = AudioResampler(target_sr=None) with pytest.raises(RuntimeError): resampler.resample(dummy_audio, orig_sr=44100) @pytest.fixture def dummy_audio_bytes(): return b"FAKEAUDIOBYTES" def test_audio_media_io_load_bytes(dummy_audio_bytes): audio_io = AudioMediaIO() with patch("vllm.multimodal.audio.librosa.load") as mock_load: mock_load.return_value = (np.array([0.1, 0.2]), 16000) out = audio_io.load_bytes(dummy_audio_bytes) mock_load.assert_called_once() assert isinstance(out[0], np.ndarray) assert out[1] == 16000 def test_audio_media_io_load_base64(dummy_audio_bytes): audio_io = AudioMediaIO() encoded = base64.b64encode(dummy_audio_bytes).decode("utf-8") with patch.object(AudioMediaIO, "load_bytes") as mock_load_bytes: mock_load_bytes.return_value = (np.array([0.1, 0.2]), 16000) out = audio_io.load_base64("audio/wav", encoded) mock_load_bytes.assert_called_once() assert isinstance(out[0], np.ndarray) assert out[1] == 16000 def test_audio_media_io_load_file(): audio_io = AudioMediaIO() path = Path("/fake/path.wav") with patch("vllm.multimodal.audio.librosa.load") as mock_load: mock_load.return_value = (np.array([0.1, 0.2]), 16000) out = audio_io.load_file(path) mock_load.assert_called_once_with(path, sr=None) assert isinstance(out[0], np.ndarray) assert out[1] == 16000 def test_audio_media_io_encode_base64(dummy_audio): audio_io = AudioMediaIO() media = (dummy_audio, 16000) with patch("vllm.multimodal.audio.soundfile.write") as mock_write: def write_to_buffer(buffer, *_args, **_kwargs): buffer.write(b"dummy_wav_data") mock_write.side_effect = write_to_buffer out = audio_io.encode_base64(media) decoded = base64.b64decode(out) assert decoded == b"dummy_wav_data" mock_write.assert_called_once()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_cache.py
tests/multimodal/test_cache.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import multiprocessing as mp import numpy as np import pytest import torch from vllm.config import ModelConfig, ParallelConfig, VllmConfig from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.cache import ( BaseMultiModalProcessorCache, BaseMultiModalReceiverCache, MultiModalCache, MultiModalProcessorCacheInItem, MultiModalProcessorCacheItem, MultiModalProcessorCacheItemMetadata, MultiModalProcessorSenderCache, MultiModalReceiverCache, ShmObjectStoreReceiverCache, ShmObjectStoreSenderCache, engine_receiver_cache_from_config, processor_cache_from_config, ) from vllm.multimodal.hasher import MultiModalHasher from vllm.multimodal.inputs import ( MultiModalFieldElem, MultiModalKwargsItem, MultiModalKwargsItems, MultiModalSharedField, ) from vllm.multimodal.processing import PromptInsertion from vllm.utils.mem_constants import GiB_bytes, MiB_bytes pytestmark = pytest.mark.cpu_test def _dummy_elem( modality: str, key: str, size: int, *, rng: np.random.RandomState | None = None, ): if rng is None: data = torch.empty((size,), dtype=torch.int8) else: data = torch.from_numpy(rng.randint(4, size=(size,), dtype=np.int8)) return MultiModalFieldElem( modality=modality, key=key, data=data, field=MultiModalSharedField(batch_size=1), ) def _dummy_item( modality: str, size_by_key: dict[str, int], *, rng: np.random.RandomState | None = None, ): return MultiModalKwargsItem.from_elems( [_dummy_elem(modality, key, size, rng=rng) for key, size in size_by_key.items()] ) def _dummy_items( size_by_key_modality: dict[str, dict[str, int]], *, rng: np.random.RandomState | None = None, ): return MultiModalKwargsItems.from_seq( [ _dummy_item(modality, size_by_key, rng=rng) for modality, size_by_key in size_by_key_modality.items() ] ) @pytest.mark.parametrize( ("item", "expected_size"), [ (_dummy_item("a", {"a1": 100}), 100), (_dummy_item("a", {"a1": 100, "a2": 110}), 210), (_dummy_items({"a": {"a1": 100, "a2": 110}, "b": {"b1": 120, "b2": 130}}), 460), # noqa: E501 ], ) def test_cache_item_size(item, expected_size): cache = MultiModalCache.get_lru_cache(2048, type(item)) cache[""] = item assert cache.currsize == expected_size prompt_update = PromptInsertion("dummy", "target", "insertion").resolve(0) cache[""] = MultiModalProcessorCacheItem(item, [prompt_update]) assert cache.currsize == expected_size cache[""] = MultiModalProcessorCacheItemMetadata(item, [prompt_update]) assert cache.currsize == expected_size cache[""] = item.get_data() assert cache.currsize == expected_size def _create_vllm_config( *, mm_processor_cache_gb: float, enable_ipc: bool, ): return VllmConfig( model_config=ModelConfig( model="llava-hf/llava-onevision-qwen2-0.5b-ov-hf", mm_processor_cache_gb=mm_processor_cache_gb, ), parallel_config=ParallelConfig(data_parallel_size=1 if enable_ipc else 2), ) def _compare_caches( config_0: VllmConfig, config_1: VllmConfig, *, item_capacity: int = 8, hit_rate: float = 0.5, max_items_per_iter: int = 3, is_cached_calls_per_iter: int, n_iter: int = 100, seed: int = 0, ): cache_0_p0 = processor_cache_from_config(config_0, MULTIMODAL_REGISTRY) cache_0_p1 = engine_receiver_cache_from_config(config_0, MULTIMODAL_REGISTRY) cache_1_p0 = processor_cache_from_config(config_1, MULTIMODAL_REGISTRY) cache_1_p1 = engine_receiver_cache_from_config(config_1, MULTIMODAL_REGISTRY) cache_size_gb = max( config_0.model_config.multimodal_config.mm_processor_cache_gb, config_1.model_config.multimodal_config.mm_processor_cache_gb, ) item_size_gb = int(cache_size_gb / item_capacity) rng = np.random.RandomState(seed) all_items = [ _dummy_item("item", {"key": item_size_gb}, rng=rng) for _ in range(int(item_capacity / hit_rate)) ] all_hashes = [ MultiModalHasher.hash_kwargs(item=item.get_data()) for item in all_items ] prompt_update = PromptInsertion("dummy", "target", "insertion").resolve(0) for it in range(n_iter): num_items_to_select = rng.randint(0, max_items_per_iter) item_idxs_to_select = rng.choice(len(all_items), num_items_to_select) selected_items = [all_items[idx] for idx in item_idxs_to_select] selected_hashes = [all_hashes[idx] for idx in item_idxs_to_select] if cache_0_p0 is None: cache_0_p0_out = selected_items else: for _ in range(is_cached_calls_per_iter): cache_0_p0.is_cached(selected_hashes) cache_0_p0_out = [ item for item, _ in cache_0_p0.get_and_update( [(item, [prompt_update]) for item in selected_items], selected_hashes, ) ] if cache_1_p0 is None: cache_1_p0_out = selected_items else: for _ in range(is_cached_calls_per_iter): cache_1_p0.is_cached(selected_hashes) cache_1_p0_out = [ item for item, _ in cache_1_p0.get_and_update( [(item, [prompt_update]) for item in selected_items], selected_hashes, ) ] if cache_0_p1 is None: cache_0_p1_out = cache_0_p0_out else: cache_0_p1_out = cache_0_p1.get_and_update(cache_0_p0_out, selected_hashes) if cache_1_p1 is None: cache_1_p1_out = cache_1_p0_out else: cache_1_p1_out = cache_1_p1.get_and_update(cache_1_p0_out, selected_hashes) assert cache_0_p1_out == cache_1_p1_out, f"Failed at {it=}" @pytest.mark.parametrize("is_cached_calls_per_iter", [1, 2, 3]) def test_ipc_enable_disable_consistency(is_cached_calls_per_iter): cache_size_gb = 1 / (1 << 20) vllm_config_ipc_enabled = _create_vllm_config( mm_processor_cache_gb=cache_size_gb, enable_ipc=True, ) vllm_config_ipc_disabled = _create_vllm_config( mm_processor_cache_gb=0, enable_ipc=False, ) vllm_config_cache_disabled = _create_vllm_config( mm_processor_cache_gb=cache_size_gb, enable_ipc=True, ) _compare_caches( vllm_config_ipc_enabled, vllm_config_ipc_disabled, is_cached_calls_per_iter=is_cached_calls_per_iter, ) _compare_caches( vllm_config_ipc_disabled, vllm_config_cache_disabled, is_cached_calls_per_iter=is_cached_calls_per_iter, ) _compare_caches( vllm_config_cache_disabled, vllm_config_ipc_enabled, is_cached_calls_per_iter=is_cached_calls_per_iter, ) def _run_test_cache_eviction_lru( p0_cache: BaseMultiModalProcessorCache, p1_cache: BaseMultiModalReceiverCache, base_item_size: int, ): request1_hashes = [ "image_A", "image_B", "image_C", ] request1_items = { h: MultiModalKwargsItem.dummy(h, nbytes=2 * base_item_size) for h in request1_hashes } request2_hashes = ["image_D", "image_E", "image_A", "image_C"] request2_items = { h: MultiModalKwargsItem.dummy(h, nbytes=1 * base_item_size) for h in request2_hashes } ########################## # STEP 1: Request 1 send ########################## sender_is_cached_item_req1 = p0_cache.is_cached(request1_hashes) # Cache is empty assert sender_is_cached_item_req1 == [False, False, False] # Touch all mm hash for P0 Cache before process for mm_hash in request1_hashes: p0_cache.touch_sender_cache_item(mm_hash) ########################### # Process request 1 for P0 Cache ########################### item_tuple: MultiModalProcessorCacheInItem for i, h in enumerate(request1_hashes): # Use precomputed cache state is_cached = sender_is_cached_item_req1[i] item_tuple = (request1_items[h], []) if not is_cached else None print(f"Request 1: key={h} | cached={is_cached}") p0_cache.get_and_update_item(item_tuple, h) ########################### # Process request 1 for P1 Cache ########################### # Touch all mm hash for P1 Cache before process for mm_hash in request1_hashes: p1_cache.touch_receiver_cache_item(mm_hash) for h in request1_hashes: p1_cache.get_and_update_item(request1_items[h], h) expected_hashes = ["image_A", "image_B", "image_C"] assert list(p0_cache._cache.order) == expected_hashes ########################## # STEP 2: Request 2 send ########################## sender_is_cached_item_req2 = p0_cache.is_cached(request2_hashes) assert sender_is_cached_item_req2 == [False, False, True, True] # Touch all mm hash for P0 Cache before process for mm_hash in request2_hashes: p0_cache.touch_sender_cache_item(mm_hash) ########################### # Process request 2 for P0 Cache ########################### for i, h in enumerate(request2_hashes): # Use precomputed cache state again is_cached = sender_is_cached_item_req2[i] item_tuple = (request2_items[h], []) if not is_cached else None print(f"Request 2: key={h} | cached={is_cached}") p0_cache.get_and_update_item(item_tuple, h) ########################### # Process request 2 for P1 Cache ########################### # Touch all mm hash for P1 Cache before process for mm_hash in request2_hashes: p1_cache.touch_receiver_cache_item(mm_hash) for h in request2_hashes: p1_cache.get_and_update_item(request2_items[h], h) expected_hashes = ["image_D", "image_E", "image_A", "image_C"] assert list(p0_cache._cache.order) == expected_hashes def test_cache_eviction_lru_cache(): model_config = ModelConfig( model="llava-hf/llava-onevision-qwen2-0.5b-ov-hf", mm_processor_cache_gb=6 / GiB_bytes, ) sender_cache = MultiModalProcessorSenderCache(model_config) receiver_cache = MultiModalReceiverCache(model_config) _run_test_cache_eviction_lru(sender_cache, receiver_cache, base_item_size=1) # This test verifies shared-memory cache eviction behavior across processor (p0) # and receiver (p1) caches. # Flow summary: # 1. Request 1 adds images A, B, C — completely filling the cache. # 2. Request 2 tries to add image_G and image_A, but image_G cannot be added because # cache is full and A is protected from eviction — cache remains unchanged. # 3. Request 3 adds image_G, image_H, image_I and image_B # this time, image_A is evicted, freeing 5MB space # and image_G, image_H successfully fits, # image_B is protected from eviction then image_i cannot be added. # This proving normal eviction and reuse behavior. def _run_test_cache_eviction_shm( p0_cache: BaseMultiModalProcessorCache, p1_cache: BaseMultiModalReceiverCache, base_item_size: int, ): request1_hashes = ["image_A", "image_B", "image_C"] request1_items = { h: MultiModalKwargsItem.dummy(h, nbytes=5 * base_item_size) for h in request1_hashes } request1_items_p0_result = [] request2_hashes = ["image_G", "image_A"] request2_items = { h: MultiModalKwargsItem.dummy( h, nbytes=(5 if h in request1_hashes else 2) * base_item_size ) for h in request2_hashes } request2_items_p0_result = [] request3_hashes = ["image_G", "image_H", "image_I", "image_B"] request3_items = { h: MultiModalKwargsItem.dummy( h, nbytes=(5 if h in request1_hashes else 2) * base_item_size ) for h in request3_hashes } request3_items_p0_result = [] ########################## # STEP 1: Request 1 send # This will fill up the cache ########################## sender_is_cached_item_req1 = p0_cache.is_cached(request1_hashes) # Cache is empty assert sender_is_cached_item_req1 == [False, False, False] # Touch all mm hash for P0 Cache before process for mm_hash in request1_hashes: p0_cache.touch_sender_cache_item(mm_hash) ########################### # Process request 1 for P0 Cache ########################### item_tuple: MultiModalProcessorCacheInItem for i, h in enumerate(request1_hashes): # Use precomputed cache state is_cached = sender_is_cached_item_req1[i] item_tuple = (request1_items[h], []) if not is_cached else None print(f"Request 1: key={h} | cached={is_cached}") p0_result = p0_cache.get_and_update_item(item_tuple, h) # Only get mm item, ignore prompt update result request1_items_p0_result.append(p0_result[0]) ########################### # Process request 1 for P1 Cache ########################### # Touch all mm hash for P1 Cache before process for mm_hash, mm_item in zip(request1_hashes, request1_items_p0_result): p1_cache.touch_receiver_cache_item(mm_hash, mm_item) for mm_hash, mm_item in zip(request1_hashes, request1_items_p0_result): p1_cache.get_and_update_item(mm_item, mm_hash) expected_hashes = ["image_A", "image_B", "image_C"] assert list(p0_cache._shm_cache.key_index.keys()) == expected_hashes ########################## # STEP 2: Request 2 send # There is no eviction because image_A is protected # No new item can add to cache ########################## sender_is_cached_item_req2 = p0_cache.is_cached(request2_hashes) assert sender_is_cached_item_req2 == [False, True] # Touch all mm hash for P0 Cache before process for mm_hash in request2_hashes: p0_cache.touch_sender_cache_item(mm_hash) ########################### # Process request 2 for P0 Cache ########################### for i, h in enumerate(request2_hashes): # Use precomputed cache state again is_cached = sender_is_cached_item_req2[i] item_tuple = (request2_items[h], []) if not is_cached else None print(f"Request 2: key={h} | cached={is_cached}") p0_result = p0_cache.get_and_update_item(item_tuple, h) # Only get mm item, ignore prompt update result request2_items_p0_result.append(p0_result[0]) # image_A cannot be evict then # image_G will fail to allocate anyway and image_A still in cache assert p0_cache.is_cached(request2_hashes) == [False, True] ########################### # Process request 2 for P1 Cache ########################### # Touch all mm hash for P1 Cache before process for mm_hash, mm_item in zip(request2_hashes, request2_items_p0_result): p1_cache.touch_receiver_cache_item(mm_hash, mm_item) for mm_hash, mm_item in zip(request2_hashes, request2_items_p0_result): p1_cache.get_and_update_item(mm_item, mm_hash) # Prove that cache state is unchanged expected_hashes = ["image_A", "image_B", "image_C"] assert list(p0_cache._shm_cache.key_index.keys()) == expected_hashes ########################## # STEP 3: Request 3 send ########################## ##### Prove that cache eviction work normally sender_is_cached_item_req3 = p0_cache.is_cached(request3_hashes) assert sender_is_cached_item_req3 == [False, False, False, True] # Touch all mm hash for P0 Cache before process for mm_hash in request3_hashes: p0_cache.touch_sender_cache_item(mm_hash) ########################### # Process request 3 for P0 Cache ########################### for i, h in enumerate(request3_hashes): # Use precomputed cache state again is_cached = sender_is_cached_item_req3[i] item_tuple = (request3_items[h], []) if not is_cached else None print(f"Request 3: key={h} | cached={is_cached}") p0_result = p0_cache.get_and_update_item(item_tuple, h) # Only get mm item, ignore prompt update result request3_items_p0_result.append(p0_result[0]) # image_A got evict and image_G add to cache # image_B is still protected # image_G, image_H fit but image_I cannot fit assert p0_cache.is_cached(request3_hashes) == [True, True, False, True] ########################### # Process request 3 for P1 Cache ########################### # Touch all mm hash for P1 Cache before process for mm_hash, mm_item in zip(request3_hashes, request3_items_p0_result): p1_cache.touch_receiver_cache_item(mm_hash, mm_item) for mm_hash, mm_item in zip(request3_hashes, request3_items_p0_result): p1_cache.get_and_update_item(mm_item, mm_hash) expected_hashes = ["image_B", "image_C", "image_G", "image_H"] assert list(p0_cache._shm_cache.key_index.keys()) == expected_hashes def test_cache_eviction_shm_cache(): vllm_config = VllmConfig( model_config=ModelConfig( model="llava-hf/llava-onevision-qwen2-0.5b-ov-hf", mm_processor_cache_type="shm", mm_shm_cache_max_object_size_mb=6, mm_processor_cache_gb=15.2 * MiB_bytes / GiB_bytes, ), ) sender_cache = ShmObjectStoreSenderCache(vllm_config) receiver_cache = ShmObjectStoreReceiverCache(vllm_config, mp.Lock()) _run_test_cache_eviction_shm(sender_cache, receiver_cache, base_item_size=MiB_bytes)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/utils.py
tests/multimodal/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import cv2 import numpy as np import numpy.typing as npt from PIL import Image def random_image(rng: np.random.RandomState, min_wh: int, max_wh: int): w, h = rng.randint(min_wh, max_wh, size=(2,)) arr = rng.randint(0, 255, size=(w, h, 3), dtype=np.uint8) return Image.fromarray(arr) def random_video( rng: np.random.RandomState, min_frames: int, max_frames: int, min_wh: int, max_wh: int, ): num_frames = rng.randint(min_frames, max_frames) w, h = rng.randint(min_wh, max_wh, size=(2,)) return rng.randint(0, 255, size=(num_frames, w, h, 3), dtype=np.uint8) def random_audio( rng: np.random.RandomState, min_len: int, max_len: int, sr: int, ): audio_len = rng.randint(min_len, max_len) return rng.rand(audio_len), sr def create_video_from_image( image_path: str, video_path: str, num_frames: int = 10, fps: float = 1.0, is_color: bool = True, fourcc: str = "mp4v", ): image = cv2.imread(image_path) if not is_color: # Convert to grayscale if is_color is False image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) height, width = image.shape else: height, width, _ = image.shape video_writer = cv2.VideoWriter( video_path, cv2.VideoWriter_fourcc(*fourcc), fps, (width, height), isColor=is_color, ) for _ in range(num_frames): video_writer.write(image) video_writer.release() return video_path def cosine_similarity(A: npt.NDArray, B: npt.NDArray, axis: int = -1) -> npt.NDArray: """Compute cosine similarity between two vectors.""" return np.sum(A * B, axis=axis) / ( np.linalg.norm(A, axis=axis) * np.linalg.norm(B, axis=axis) ) def normalize_image(image: npt.NDArray) -> npt.NDArray: """Normalize image to [0, 1] range.""" return image.astype(np.float32) / 255.0
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_image.py
tests/multimodal/test_image.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from pathlib import Path import numpy as np import pytest from PIL import Image, ImageChops from vllm.multimodal.image import ImageMediaIO, convert_image_mode pytestmark = pytest.mark.cpu_test ASSETS_DIR = Path(__file__).parent / "assets" assert ASSETS_DIR.exists() def test_rgb_to_rgb(): # Start with an RGB image. original_image = Image.open(ASSETS_DIR / "image1.png").convert("RGB") converted_image = convert_image_mode(original_image, "RGB") # RGB to RGB should be a no-op. diff = ImageChops.difference(original_image, converted_image) assert diff.getbbox() is None def test_rgba_to_rgb(): original_image = Image.open(ASSETS_DIR / "rgba.png") original_image_numpy = np.array(original_image) converted_image = convert_image_mode(original_image, "RGB") converted_image_numpy = np.array(converted_image) for i in range(original_image_numpy.shape[0]): for j in range(original_image_numpy.shape[1]): # Verify that all transparent pixels are converted to white. if original_image_numpy[i][j][3] == 0: assert converted_image_numpy[i][j][0] == 255 assert converted_image_numpy[i][j][1] == 255 assert converted_image_numpy[i][j][2] == 255 def test_rgba_to_rgb_custom_background(tmp_path): """Test RGBA to RGB conversion with custom background colors.""" # Create a simple RGBA image with transparent and opaque pixels rgba_image = Image.new("RGBA", (10, 10), (255, 0, 0, 255)) # Red with full opacity # Make top-left quadrant transparent for i in range(5): for j in range(5): rgba_image.putpixel((i, j), (0, 0, 0, 0)) # Fully transparent # Save the test image to tmp_path test_image_path = tmp_path / "test_rgba.png" rgba_image.save(test_image_path) # Test 1: Default white background (backward compatibility) image_io_default = ImageMediaIO() converted_default = image_io_default.load_file(test_image_path) default_numpy = np.array(converted_default) # Check transparent pixels are white assert default_numpy[0][0][0] == 255 # R assert default_numpy[0][0][1] == 255 # G assert default_numpy[0][0][2] == 255 # B # Check opaque pixels remain red assert default_numpy[5][5][0] == 255 # R assert default_numpy[5][5][1] == 0 # G assert default_numpy[5][5][2] == 0 # B # Test 2: Custom black background via kwargs image_io_black = ImageMediaIO(rgba_background_color=(0, 0, 0)) converted_black = image_io_black.load_file(test_image_path) black_numpy = np.array(converted_black) # Check transparent pixels are black assert black_numpy[0][0][0] == 0 # R assert black_numpy[0][0][1] == 0 # G assert black_numpy[0][0][2] == 0 # B # Check opaque pixels remain red assert black_numpy[5][5][0] == 255 # R assert black_numpy[5][5][1] == 0 # G assert black_numpy[5][5][2] == 0 # B # Test 3: Custom blue background via kwargs (as list) image_io_blue = ImageMediaIO(rgba_background_color=[0, 0, 255]) converted_blue = image_io_blue.load_file(test_image_path) blue_numpy = np.array(converted_blue) # Check transparent pixels are blue assert blue_numpy[0][0][0] == 0 # R assert blue_numpy[0][0][1] == 0 # G assert blue_numpy[0][0][2] == 255 # B # Test 4: Test with load_bytes method with open(test_image_path, "rb") as f: image_data = f.read() image_io_green = ImageMediaIO(rgba_background_color=(0, 255, 0)) converted_green = image_io_green.load_bytes(image_data) green_numpy = np.array(converted_green) # Check transparent pixels are green assert green_numpy[0][0][0] == 0 # R assert green_numpy[0][0][1] == 255 # G assert green_numpy[0][0][2] == 0 # B def test_rgba_background_color_validation(): """Test that invalid rgba_background_color values are properly rejected.""" # Test invalid types with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color="255,255,255") with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color=255) # Test wrong number of elements with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color=(255, 255)) with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color=(255, 255, 255, 255)) # Test non-integer values with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color=(255.0, 255.0, 255.0)) with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color=(255, "255", 255)) # Test out of range values with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color=(256, 255, 255)) with pytest.raises( ValueError, match="rgba_background_color must be a list or tuple" ): ImageMediaIO(rgba_background_color=(255, -1, 255)) # Test that valid values work ImageMediaIO(rgba_background_color=(0, 0, 0)) # Should not raise ImageMediaIO(rgba_background_color=[255, 255, 255]) # Should not raise ImageMediaIO(rgba_background_color=(128, 128, 128)) # Should not raise
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/__init__.py
tests/multimodal/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_embedding_shape_validation_unit.py
tests/multimodal/test_embedding_shape_validation_unit.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Unit tests for embedding shape validation. Simple, fast unit tests that can run without server fixtures. Run with: pytest tests/multimodal/test_embedding_shape_validation_unit.py -v """ import pytest import torch from vllm.multimodal.parse import ( AudioEmbeddingItems, ImageEmbeddingItems, ) class TestImageEmbedBasicValidation: """Test basic ndim validation in image embeddings via ImageEmbeddingItems.""" def test_valid_2d_tensor_accepted(self): """Baseline: 2D tensors should be accepted.""" valid_tensor = torch.randn(10, 768, dtype=torch.float32) # Should not raise - 2D is valid items = ImageEmbeddingItems(valid_tensor) assert items.get_count() == 10 def test_valid_3d_tensor_accepted(self): """Baseline: 3D tensors should be accepted.""" valid_tensor = torch.randn(2, 10, 768, dtype=torch.float32) # Should not raise - 3D is valid items = ImageEmbeddingItems(valid_tensor) assert items.get_count() == 2 def test_valid_list_of_2d_tensors_accepted(self): """Baseline: List of 2D tensors should be accepted.""" tensors = [ torch.randn(10, 768, dtype=torch.float32), torch.randn(15, 768, dtype=torch.float32), ] # Should not raise items = ImageEmbeddingItems(tensors) assert items.get_count() == 2 def test_1d_tensor_rejected(self): """Security: 1D tensors should be rejected (invalid ndim).""" invalid_tensor = torch.randn(768, dtype=torch.float32) # 1D with pytest.raises(ValueError) as exc_info: ImageEmbeddingItems(invalid_tensor) assert "must be 2D" in str(exc_info.value) or "3D" in str(exc_info.value) def test_4d_tensor_rejected(self): """Security: 4D tensors should be rejected (invalid ndim).""" invalid_tensor = torch.randn(1, 2, 10, 768, dtype=torch.float32) # 4D with pytest.raises(ValueError) as exc_info: ImageEmbeddingItems(invalid_tensor) assert "must be 2D" in str(exc_info.value) or "3D" in str(exc_info.value) def test_hidden_size_validation_correct_size(self): """Embeddings with correct hidden size should be accepted.""" expected_hidden_size = 768 valid_tensor = torch.randn(10, expected_hidden_size, dtype=torch.float32) # Should not raise items = ImageEmbeddingItems( valid_tensor, expected_hidden_size=expected_hidden_size ) assert items.get_count() == 10 def test_hidden_size_validation_wrong_size_rejected(self): """Embeddings with wrong hidden size should be rejected.""" expected_hidden_size = 768 wrong_hidden_size = 4096 invalid_tensor = torch.randn(10, wrong_hidden_size, dtype=torch.float32) with pytest.raises(ValueError) as exc_info: ImageEmbeddingItems( invalid_tensor, expected_hidden_size=expected_hidden_size ) error_msg = str(exc_info.value) assert "hidden dimension mismatch" in error_msg.lower() assert str(wrong_hidden_size) in error_msg assert str(expected_hidden_size) in error_msg class TestAudioEmbedBasicValidation: """Test basic ndim validation in audio embeddings via AudioEmbeddingItems.""" def test_valid_2d_tensor_accepted(self): """Baseline: 2D tensors should be accepted.""" valid_tensor = torch.randn(10, 768, dtype=torch.float32) # Should not raise - 2D is valid items = AudioEmbeddingItems(valid_tensor) assert items.get_count() == 10 def test_valid_3d_tensor_accepted(self): """Baseline: 3D tensors should be accepted.""" valid_tensor = torch.randn(2, 10, 768, dtype=torch.float32) # Should not raise - 3D is valid items = AudioEmbeddingItems(valid_tensor) assert items.get_count() == 2 def test_valid_list_of_2d_tensors_accepted(self): """Baseline: List of 2D tensors should be accepted.""" tensors = [ torch.randn(10, 768, dtype=torch.float32), torch.randn(15, 768, dtype=torch.float32), ] # Should not raise items = AudioEmbeddingItems(tensors) assert items.get_count() == 2 def test_1d_tensor_rejected(self): """Security: 1D tensors should be rejected (invalid ndim).""" invalid_tensor = torch.randn(768, dtype=torch.float32) # 1D with pytest.raises(ValueError) as exc_info: AudioEmbeddingItems(invalid_tensor) assert "must be 2D" in str(exc_info.value) or "3D" in str(exc_info.value) def test_scalar_rejected(self): """Security: Scalar tensors should be rejected.""" invalid_tensor = torch.tensor(1.0) # 0D (scalar) with pytest.raises(ValueError): AudioEmbeddingItems(invalid_tensor) def test_hidden_size_validation_correct_size(self): """Embeddings with correct hidden size should be accepted.""" expected_hidden_size = 768 valid_tensor = torch.randn(10, expected_hidden_size, dtype=torch.float32) # Should not raise items = AudioEmbeddingItems( valid_tensor, expected_hidden_size=expected_hidden_size ) assert items.get_count() == 10 def test_hidden_size_validation_wrong_size_rejected(self): """Embeddings with wrong hidden size should be rejected.""" expected_hidden_size = 768 wrong_hidden_size = 4096 invalid_tensor = torch.randn(10, wrong_hidden_size, dtype=torch.float32) with pytest.raises(ValueError) as exc_info: AudioEmbeddingItems( invalid_tensor, expected_hidden_size=expected_hidden_size ) error_msg = str(exc_info.value) assert "hidden dimension mismatch" in error_msg.lower() assert str(wrong_hidden_size) in error_msg assert str(expected_hidden_size) in error_msg class TestShapeValidationDoSPrevention: """ Tests for DoS prevention through shape validation. Verifies that embeddings with incorrect shapes are rejected early, preventing crashes during model inference. """ def test_prevent_crash_from_wrong_shape_image_embeds(self): """ Prevent crash scenario: wrong hidden size in image embeddings. Without validation, this would pass initial checks but crash later during model forward pass when dimensions don't match. """ expected_hidden_size = 768 # Typical model hidden size wrong_hidden_size = 4096 # Wrong size (e.g., Llama-sized) wrong_embedding = torch.randn(100, wrong_hidden_size, dtype=torch.float32) # Should be rejected at instantiation time, not during inference with pytest.raises(ValueError) as exc_info: ImageEmbeddingItems( wrong_embedding, expected_hidden_size=expected_hidden_size ) error_msg = str(exc_info.value) assert "hidden dimension mismatch" in error_msg.lower() assert str(expected_hidden_size) in error_msg # Expected assert str(wrong_hidden_size) in error_msg # Received def test_prevent_crash_from_wrong_shape_audio_embeds(self): """ Prevent crash scenario: wrong hidden size in audio embeddings. """ expected_hidden_size = 768 wrong_hidden_size = 4096 wrong_embedding = torch.randn(100, wrong_hidden_size, dtype=torch.float32) with pytest.raises(ValueError) as exc_info: AudioEmbeddingItems( wrong_embedding, expected_hidden_size=expected_hidden_size ) error_msg = str(exc_info.value) assert "hidden dimension mismatch" in error_msg.lower() def test_extremely_large_hidden_size_rejected(self): """Security: Prevent DoS from extremely large embeddings.""" expected_hidden_size = 768 huge_hidden_size = 100000 # Large but not extreme to avoid test OOM invalid_tensor = torch.randn(10, huge_hidden_size, dtype=torch.float32) with pytest.raises(ValueError) as exc_info: ImageEmbeddingItems( invalid_tensor, expected_hidden_size=expected_hidden_size ) assert "hidden dimension mismatch" in str(exc_info.value).lower() def test_batch_with_mixed_hidden_sizes_rejected(self): """All embeddings in a list must have the same hidden size.""" expected_hidden_size = 768 # One correct, one wrong batch = [ torch.randn(10, expected_hidden_size, dtype=torch.float32), torch.randn(10, expected_hidden_size + 100, dtype=torch.float32), # Wrong! ] # Should fail on the second one with pytest.raises(ValueError) as exc_info: ImageEmbeddingItems(batch, expected_hidden_size=expected_hidden_size) assert "hidden dimension mismatch" in str(exc_info.value).lower() if __name__ == "__main__": pytest.main([__file__, "-v", "--tb=short"])
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_hasher.py
tests/multimodal/test_hasher.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import uuid from pathlib import Path import numpy as np import pytest import torch from PIL import Image, ImageDraw from vllm.multimodal.hasher import MultiModalHasher pytestmark = pytest.mark.cpu_test ASSETS_DIR = Path(__file__).parent / "assets" assert ASSETS_DIR.exists() # NOTE: Images that are the same visually are allowed to have the same hash @pytest.mark.parametrize("mode_pair", [("1", "L"), ("RGBA", "CMYK")]) def test_hash_collision_image_mode(mode_pair): mode1, mode2 = mode_pair image1 = Image.new(mode1, size=(10, 10), color=1) image2 = Image.new(mode2, size=(10, 10), color=1) hasher = MultiModalHasher assert hasher.hash_kwargs(image=image1) != hasher.hash_kwargs(image=image2) def test_hash_collision_image_palette(): # These images differ only in Image.palette._palette image1 = Image.open(ASSETS_DIR / "image1.png") image2 = Image.open(ASSETS_DIR / "image2.png") hasher = MultiModalHasher assert hasher.hash_kwargs(image=image1) != hasher.hash_kwargs(image=image2) def test_hash_collision_image_transpose(): image1 = Image.new("1", size=(10, 20)) ImageDraw.Draw(image1).line([(0, 0), (10, 0)]) image2 = Image.new("1", size=(20, 10)) ImageDraw.Draw(image2).line([(0, 0), (0, 10)]) hasher = MultiModalHasher assert hasher.hash_kwargs(image=image1) != hasher.hash_kwargs(image=image2) @pytest.mark.parametrize("dtype", [torch.float32, torch.bfloat16]) def test_hash_collision_tensor_shape(dtype): # The hash should be different though the data is the same when flattened arr1 = torch.zeros((5, 10, 20, 3), dtype=dtype) arr2 = torch.zeros((10, 20, 5, 3), dtype=dtype) hasher = MultiModalHasher assert hasher.hash_kwargs(data=arr1) != hasher.hash_kwargs(data=arr2) def test_hash_collision_array_shape(): # The hash should be different though the data is the same when flattened arr1 = np.zeros((5, 10, 20, 3)) arr2 = np.zeros((10, 20, 5, 3)) hasher = MultiModalHasher assert hasher.hash_kwargs(data=arr1) != hasher.hash_kwargs(data=arr2) def test_hash_non_contiguous_array(): arr = np.arange(24).reshape(4, 6).T assert not arr.flags.c_contiguous arr_c = np.ascontiguousarray(arr) assert arr_c.flags.c_contiguous hasher = MultiModalHasher # Both should be hashable and produce the same hashes assert hasher.hash_kwargs(data=arr) == hasher.hash_kwargs(data=arr_c) def test_hash_image_exif_id(): # Test that EXIF ImageId tag can be used to store UUID # and the hasher will use that instead of the image data. image1 = image2 = Image.new("1", size=(10, 20)) id = uuid.uuid4() image1.getexif()[Image.ExifTags.Base.ImageID] = id image2 = Image.open(ASSETS_DIR / "image1.png") image2.getexif()[Image.ExifTags.Base.ImageID] = "Not a UUID" image2a = Image.open(ASSETS_DIR / "image1.png") hasher = MultiModalHasher # first image has UUID in ImageID, so it should hash to that UUID assert hasher.hash_kwargs(image=image1) == hasher.hash_kwargs(image=id.bytes) # second image has non-UUID in ImageID, so it should hash to the image data assert hasher.hash_kwargs(image=image2) == hasher.hash_kwargs(image=image2a)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/multimodal/test_processing.py
tests/multimodal/test_processing.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from contextlib import nullcontext import numpy as np import pytest from vllm.config import ModelConfig from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.processing import ( InputProcessingContext, PlaceholderFeaturesInfo, PromptIndexTargets, PromptInsertion, PromptReplacement, _apply_matches, apply_text_matches, apply_token_matches, find_mm_placeholders, iter_token_matches, replace_token_matches, ) from vllm.multimodal.profiling import MultiModalProfiler from .utils import random_image pytestmark = pytest.mark.cpu_test @pytest.mark.parametrize( ("token_ids", "match_ids", "expected"), [ ([], [], []), ([], [32000], []), ( [32000, 32000, 32000], [32000], [ {"start_idx": 0, "end_idx": 1}, {"start_idx": 1, "end_idx": 2}, {"start_idx": 2, "end_idx": 3}, ], ), ( [32000, 32000, 32000], [32000, 32000], [{"start_idx": 0, "end_idx": 2}], ), ( [32000, 32000, 32000], [32000, 32000, 32000], [{"start_idx": 0, "end_idx": 3}], ), ( [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], [28747, 32000], [ {"start_idx": 1, "end_idx": 3}, {"start_idx": 6, "end_idx": 8}, ], ), ( [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], [28747, 32000, 32000, 32000], [ {"start_idx": 1, "end_idx": 5}, ], ), ( [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], [28747, 0, 32000], [], ), ], ) @pytest.mark.parametrize("start_idx", [0, 4, 8]) def test_iter_token_matches(token_ids, match_ids, expected, start_idx): result = list(iter_token_matches(token_ids, match_ids, start_idx=start_idx)) # Manually constructed results assert [item._asdict() for item in result] == [ item for item in expected if item["start_idx"] >= start_idx ] # Invariants match_lens = [end - start for start, end in result] print("match_lens:", match_lens) # Only displayed on error assert all(match_len == len(match_ids) for match_len in match_lens) @pytest.mark.parametrize( ("token_ids", "match_ids", "new_ids", "expected"), [ ([], [], [-1], []), ([], [32000], [-1], []), ( [32000, 32000, 32000], [32000], [-1], [-1, -1, -1], ), ( [32000, 32000, 32000], [32000, 32000], [-1], [-1, 32000], ), ( [32000, 32000, 32000], [32000, 32000, 32000], [-1], [-1], ), ( [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], [28747, 32000], [-1], [9833, -1, 32000, 32000, 9833, -1, 32000, 918], ), ( [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], [28747, 32000, 32000, 32000], [-1], [9833, -1, 9833, 28747, 32000, 32000, 918], ), ( [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], [28747, 0, 32000], [-1], [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], ), ], ) def test_replace_token_matches(token_ids, match_ids, new_ids, expected): result = replace_token_matches(token_ids, match_ids, new_ids) # Manually constructed results assert result == expected @pytest.mark.parametrize( ("prompt", "target_by_key", "expected_by_key"), [ ( [], { "pattern_1": [], "pattern_2": [32000], "pattern_3": PromptIndexTargets.start(), "pattern_4": PromptIndexTargets.prefix([32000]), "pattern_5": PromptIndexTargets.end(), }, { "pattern_1": [], "pattern_2": [], "pattern_3": [ {"start_idx": 0, "end_idx": 0}, ], "pattern_4": [], "pattern_5": [ {"start_idx": 0, "end_idx": 0}, ], }, ), ( [32000, 32000, 32000, 32000], { "pattern_1": [32000], "pattern_2": [32000, 32000], "pattern_3": [32000, 32000, 32000], "pattern_4": PromptIndexTargets.start(), "pattern_5": PromptIndexTargets.prefix([32000]), "pattern_6": PromptIndexTargets.end(), }, { "pattern_1": [ {"start_idx": 0, "end_idx": 1}, {"start_idx": 1, "end_idx": 2}, {"start_idx": 2, "end_idx": 3}, {"start_idx": 3, "end_idx": 4}, ], "pattern_2": [ {"start_idx": 0, "end_idx": 2}, {"start_idx": 2, "end_idx": 4}, ], "pattern_3": [ {"start_idx": 0, "end_idx": 3}, ], "pattern_4": [ {"start_idx": 0, "end_idx": 0}, ], "pattern_5": [ {"start_idx": 1, "end_idx": 1}, ], "pattern_6": [ {"start_idx": 4, "end_idx": 4}, ], }, ), ( [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], { "pattern_1": [28747, 32000], "pattern_2": [28747, 32000, 32000, 32000], "pattern_3": [28747, 0, 32000], "pattern_4": PromptIndexTargets.start(), "pattern_5": PromptIndexTargets.prefix([28747, 32000]), "pattern_6": PromptIndexTargets.end(), }, { "pattern_1": [ {"start_idx": 1, "end_idx": 3}, {"start_idx": 6, "end_idx": 8}, ], "pattern_2": [ {"start_idx": 1, "end_idx": 5}, ], "pattern_3": [], "pattern_4": [ {"start_idx": 0, "end_idx": 0}, ], "pattern_5": [], "pattern_6": [ {"start_idx": 10, "end_idx": 10}, ], }, ), ], ) @pytest.mark.parametrize("update_type", [PromptInsertion, PromptReplacement]) def test_find_token_matches( prompt, target_by_key, expected_by_key, update_type, ): prompt_updates = { key: update_type(key, target, []).resolve(0) for key, target in target_by_key.items() } result = { key: list(update.iter_token_matches(prompt, tokenizer=None)) for key, update in prompt_updates.items() } # Only displayed on error print("result:", result) # Manually constructed results assert { key: [ dict(start_idx=item.start_idx, end_idx=item.end_idx) for item in result.get(key, []) ] for key in expected_by_key } == expected_by_key @pytest.mark.parametrize( ("prompt", "target_by_key", "expected_by_key"), [ # Detokenized test cases of `test_find_token_matches` # using the vocab of llava-hf/llava-v1.6-mistral-7b-hf ( "", { "pattern_1": "", "pattern_2": "<image>", "pattern_3": PromptIndexTargets.start(), "pattern_4": PromptIndexTargets.prefix("<image>"), "pattern_5": PromptIndexTargets.end(), }, { "pattern_1": [{"start_idx": 0, "end_idx": 0}], "pattern_2": [], "pattern_3": [ {"start_idx": 0, "end_idx": 0}, ], "pattern_4": [], "pattern_5": [ {"start_idx": 0, "end_idx": 0}, ], }, ), ( "<image><image><image><image>", { "pattern_1": "<image>", "pattern_2": "<image><image>", "pattern_3": "<image><image><image>", "pattern_4": PromptIndexTargets.start(), "pattern_5": PromptIndexTargets.prefix("<image>"), "pattern_6": PromptIndexTargets.end(), }, { "pattern_1": [ {"start_idx": 0, "end_idx": 7}, {"start_idx": 7, "end_idx": 14}, {"start_idx": 14, "end_idx": 21}, {"start_idx": 21, "end_idx": 28}, ], "pattern_2": [ {"start_idx": 0, "end_idx": 14}, {"start_idx": 14, "end_idx": 28}, ], "pattern_3": [ {"start_idx": 0, "end_idx": 21}, ], "pattern_4": [ {"start_idx": 0, "end_idx": 0}, ], "pattern_5": [ {"start_idx": 7, "end_idx": 7}, ], "pattern_6": [ {"start_idx": 28, "end_idx": 28}, ], }, ), ( "Image:<image><image><image>Image:<image><image>!", { "pattern_1": "Image:<image>", "pattern_2": "Image:<image><image><image>", "pattern_3": "Image:<unk><image>", "pattern_4": PromptIndexTargets.start(), "pattern_5": PromptIndexTargets.prefix("Image:<image>"), "pattern_6": PromptIndexTargets.end(), }, { "pattern_1": [ {"start_idx": 0, "end_idx": 13}, {"start_idx": 27, "end_idx": 40}, ], "pattern_2": [ {"start_idx": 0, "end_idx": 27}, ], "pattern_3": [], "pattern_4": [ {"start_idx": 0, "end_idx": 0}, ], "pattern_5": [ {"start_idx": 13, "end_idx": 13}, ], "pattern_6": [ {"start_idx": 48, "end_idx": 48}, ], }, ), # Test regex escape ( "<|image|><image><|image|><image>", { "pattern_1": "<|image|>", "pattern_2": "<|image|><image>", "pattern_3": "<|image|><image><|image|>", }, { "pattern_1": [ {"start_idx": 0, "end_idx": 9}, {"start_idx": 16, "end_idx": 25}, ], "pattern_2": [ {"start_idx": 0, "end_idx": 16}, {"start_idx": 16, "end_idx": 32}, ], "pattern_3": [ {"start_idx": 0, "end_idx": 25}, ], }, ), ], ) @pytest.mark.parametrize("update_type", [PromptInsertion, PromptReplacement]) def test_find_text_matches( prompt, target_by_key, expected_by_key, update_type, ): prompt_updates = { key: update_type(key, target, []).resolve(0) for key, target in target_by_key.items() } result = { key: list(update.iter_text_matches(prompt, tokenizer=None)) for key, update in prompt_updates.items() } # Only displayed on error print("result:", result) # Manually constructed results assert { key: [ dict(start_idx=item.start_idx, end_idx=item.end_idx) for item in result.get(key, []) ] for key in expected_by_key } == expected_by_key @pytest.mark.parametrize( ("prompt", "target_by_key", "repl_by_key", "expected_by_update_type_mm_count"), # noqa: E501 [ ( "Image:<image>Image:<image><image>!", { # We use `<image>` before `Image:` to test matches that # occur out of order "pattern_1": "<image>", "pattern_2": "Image:", "pattern_3": "!", }, { # Test whether target is confused with replacement "pattern_1": "<image><image>", # Test empty replacement "pattern_2": "", # Test dynamic replacement (beyond the form of `unit * count`) "pattern_3": "?!?", }, { PromptInsertion: { 0: "Image:<image>Image:<image><image>!", 1: "Image:<image><image><image>Image:<image><image>!?!?", 2: "Image:<image><image><image><image><image>Image:<image><image>!?!??!?", # noqa: E501 }, PromptReplacement: { 0: "Image:<image>Image:<image><image>!", 1: "<image><image>Image:<image><image>?!?", 2: "<image><image><image><image><image>?!?", }, }, ), # Test index targets ( "", { "pattern_1": PromptIndexTargets.start(), "pattern_2": PromptIndexTargets.prefix("<image>"), "pattern_3": PromptIndexTargets.end(), }, { "pattern_1": "1", "pattern_2": "2", "pattern_3": "3", }, { PromptInsertion: { 0: "", 1: "13", 2: "1133", }, PromptReplacement: { 0: "", 1: "13", 2: "1133", }, }, ), ( "<image>", { "pattern_1": PromptIndexTargets.start(), "pattern_2": PromptIndexTargets.prefix("<image>"), "pattern_3": PromptIndexTargets.end(), }, { "pattern_1": "1", "pattern_2": "2", "pattern_3": "3", }, { PromptInsertion: { 0: "<image>", 1: "1<image>23", 2: "11<image>2233", }, PromptReplacement: { 0: "<image>", 1: "1<image>23", 2: "11<image>2233", }, }, ), # Test different replacement per item ( "<image><image><image>", { "pattern_1": "<image>", }, { "pattern_1": lambda idx: str(idx + 1), }, { PromptInsertion: { 0: "<image><image><image>", 1: "<image>1<image><image>", 2: "<image>12<image><image>", }, PromptReplacement: { 0: "<image><image><image>", 1: "1<image><image>", 2: "12<image>", }, }, ), ( "<image><image><image>", { "pattern_1": PromptIndexTargets.prefix("<image>"), }, { "pattern_1": lambda idx: str(idx + 1), }, { PromptInsertion: { 0: "<image><image><image>", 1: "<image>1<image><image>", 2: "<image>12<image><image>", }, PromptReplacement: { 0: "<image><image><image>", 1: "<image>1<image><image>", 2: "<image>12<image><image>", }, }, ), ], ) def test_find_update_text( prompt, target_by_key, repl_by_key, expected_by_update_type_mm_count, ): for ( update_type, expected_by_mm_count, ) in expected_by_update_type_mm_count.items(): for mm_count, expected in expected_by_mm_count.items(): mm_prompt_updates = { key: [ [update_type(key, target, repl_by_key[key]).resolve(i)] for i in range(mm_count) ] for key, target in target_by_key.items() } new_prompt, result = apply_text_matches( prompt, mm_prompt_updates, tokenizer=None, ) # Only displayed on error print("update_type:", update_type) print("mm_count:", mm_count) print("mm_prompt_updates:", mm_prompt_updates) print("new_prompt:", new_prompt) print("result:", result) # Manually constructed results assert new_prompt == expected @pytest.mark.parametrize( ("prompt", "target_by_key", "repl_by_key", "expected_by_update_type_mm_count"), # noqa: E501 [ # Tokenized test cases of `test_find_update_text` # using the vocab of llava-hf/llava-v1.6-mistral-7b-hf ( [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], { # We use `<image>` before `Image:` to test matches that # occur out of order "pattern_1": [32000], "pattern_2": [9833, 28747], "pattern_3": [918], }, { # Test whether target is confused with replacement "pattern_1": [32000, 32000], # Test empty replacement "pattern_2": [], # Test dynamic replacement (beyond the form of `unit * count`) "pattern_3": [1550, 918, 1550], }, { PromptInsertion: { 0: [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], 1: [ 1, 9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918, 1550, 918, 1550, ], # noqa: E501 2: [ 1, 9833, 28747, 32000, 32000, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918, 1550, 918, 1550, 1550, 918, 1550, ], # noqa: E501 }, PromptReplacement: { 0: [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], 1: [1, 32000, 32000, 9833, 28747, 32000, 32000, 1550, 918, 1550], # noqa: E501 2: [1, 32000, 32000, 32000, 32000, 32000, 1550, 918, 1550], }, }, ), # Test index targets ( [], { "pattern_1": PromptIndexTargets.start(), "pattern_2": PromptIndexTargets.prefix([32000]), "pattern_3": PromptIndexTargets.end(), }, { "pattern_1": [-1], "pattern_2": [-2], "pattern_3": [-3], }, { PromptInsertion: { 0: [], 1: [-1, -3], 2: [-1, -1, -3, -3], }, PromptReplacement: { 0: [], 1: [-1, -3], 2: [-1, -1, -3, -3], }, }, ), ( [32000], { "pattern_1": PromptIndexTargets.start(), "pattern_2": PromptIndexTargets.prefix([32000]), "pattern_3": PromptIndexTargets.end(), }, { "pattern_1": [-1], "pattern_2": [-2], "pattern_3": [-3], }, { PromptInsertion: { 0: [32000], 1: [-1, 32000, -2, -3], 2: [-1, -1, 32000, -2, -2, -3, -3], }, PromptReplacement: { 0: [32000], 1: [-1, 32000, -2, -3], 2: [-1, -1, 32000, -2, -2, -3, -3], }, }, ), # Test different replacement per item ( [32000, 32000, 32000], { "pattern_1": [32000], }, { "pattern_1": lambda idx: [-(idx + 1)], }, { PromptInsertion: { 0: [32000, 32000, 32000], 1: [32000, -1, 32000, 32000], 2: [32000, -1, -2, 32000, 32000], }, PromptReplacement: { 0: [32000, 32000, 32000], 1: [-1, 32000, 32000], 2: [-1, -2, 32000], }, }, ), ( [32000, 32000, 32000], { "pattern_1": PromptIndexTargets.prefix([32000]), }, { "pattern_1": lambda idx: [-(idx + 1)], }, { PromptInsertion: { 0: [32000, 32000, 32000], 1: [32000, -1, 32000, 32000], 2: [32000, -1, -2, 32000, 32000], }, PromptReplacement: { 0: [32000, 32000, 32000], 1: [32000, -1, 32000, 32000], 2: [32000, -1, -2, 32000, 32000], }, }, ), ], ) def test_find_update_tokens( prompt, target_by_key, repl_by_key, expected_by_update_type_mm_count, ): for ( update_type, expected_by_mm_count, ) in expected_by_update_type_mm_count.items(): for mm_count, expected in expected_by_mm_count.items(): mm_prompt_updates = { key: [ [update_type(key, target, repl_by_key[key]).resolve(i)] for i in range(mm_count) ] for key, target in target_by_key.items() } new_prompt, result = apply_token_matches( prompt, mm_prompt_updates, tokenizer=None, ) # Only displayed on error print("update_type:", update_type) print("mm_count:", mm_count) print("mm_prompt_updates:", mm_prompt_updates) print("new_prompt:", new_prompt) print("result:", result) # Manually constructed results assert new_prompt == expected @pytest.mark.parametrize( "repl_by_key", [ { "pattern_1": [32000, 32000], "pattern_2": [], "pattern_3": [1550, 918, 1550], # Test different modalities having the same tokens (32000) "pattern_4": [32000], }, ], ) @pytest.mark.parametrize( ("prompt", "expected"), [ ( [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], { "pattern_1": [ PlaceholderFeaturesInfo( modality="pattern_1", item_idx=0, start_idx=6, tokens=[32000, 32000], is_embed=None, ), ], "pattern_4": [ PlaceholderFeaturesInfo( modality="pattern_4", item_idx=0, start_idx=3, tokens=[32000], is_embed=None, ), ], }, ), ( [1, 32000, 32000, 9833, 28747, 32000, 32000, 1550, 918, 1550], { "pattern_1": [ PlaceholderFeaturesInfo( modality="pattern_1", item_idx=0, start_idx=1, tokens=[32000, 32000], is_embed=None, ), PlaceholderFeaturesInfo( modality="pattern_1", item_idx=1, start_idx=5, tokens=[32000, 32000], is_embed=None, ), ], "pattern_3": [ PlaceholderFeaturesInfo( modality="pattern_3", item_idx=0, start_idx=7, tokens=[1550, 918, 1550], is_embed=None, ), ], # No match for pattern_4 as it has lower priority than pattern_1 }, ), ( [1, 32000, 32000, 32000, 32000, 32000, 1550, 918, 1550], { "pattern_1": [ PlaceholderFeaturesInfo( modality="pattern_1", item_idx=0, start_idx=1, tokens=[32000, 32000], is_embed=None, ), PlaceholderFeaturesInfo( modality="pattern_1", item_idx=1, start_idx=3, tokens=[32000, 32000], is_embed=None, ), ], "pattern_4": [ PlaceholderFeaturesInfo( modality="pattern_4", item_idx=0, start_idx=5, tokens=[32000], is_embed=None, ), ], "pattern_3": [ PlaceholderFeaturesInfo( modality="pattern_3", item_idx=0, start_idx=6, tokens=[1550, 918, 1550], is_embed=None, ), ], }, ), ], ) @pytest.mark.parametrize("update_type", [PromptInsertion, PromptReplacement]) def test_find_mm_placeholders( repl_by_key, prompt, expected, update_type, ): mm_prompt_updates = { key: [[update_type(key, [], repl).resolve(i)] for i in range(3)] for key, repl in repl_by_key.items() } result = find_mm_placeholders(prompt, mm_prompt_updates, tokenizer=None) # Only displayed on error print("result:", result) # Manually constructed results assert result == expected @pytest.mark.parametrize("model_id", ["llava-hf/llava-v1.6-mistral-7b-hf"]) @pytest.mark.parametrize( ("limit", "num_supported", "is_valid"), [ (0, 0, True), (0, 1, True), (1, 0, False), (1, 1, True), (1, 2, True), (2, 1, False), (2, 2, True), ], ) def test_limit_mm_per_prompt_dummy(model_id, limit, num_supported, is_valid): limit_mm_per_prompt = {"image": limit} model_config = ModelConfig( model=model_id, limit_mm_per_prompt=limit_mm_per_prompt, ) processor = MULTIMODAL_REGISTRY.create_processor(model_config) processor._supported_mm_limits = {"image": num_supported} profiler = MultiModalProfiler(processor) exc_ctx = nullcontext() if is_valid else pytest.raises(ValueError, match="At most") with exc_ctx: profiler.get_decoder_dummy_data( model_config.max_model_len, mm_counts=limit_mm_per_prompt, ) @pytest.mark.parametrize("model_id", ["llava-hf/llava-v1.6-mistral-7b-hf"]) @pytest.mark.parametrize( ("num_images", "limit", "is_valid"), [ (0, 0, True), (0, 1, True), (1, 0, False), (1, 1, True), (1, 2, True), (2, 1, False), (2, 2, True), ], ) def test_limit_mm_per_prompt_apply(model_id, num_images, limit, is_valid): limit_mm_per_prompt = {"image": limit} model_config = ModelConfig( model=model_id, limit_mm_per_prompt=limit_mm_per_prompt, ) processor = MULTIMODAL_REGISTRY.create_processor(model_config) rng = np.random.RandomState(0) image = random_image(rng, min_wh=128, max_wh=256) if num_images == 0: mm_data = {} elif num_images == 1: mm_data = {"image": image} else: mm_data = {"image": [image] * num_images} exc_ctx = nullcontext() if is_valid else pytest.raises(ValueError, match="At most") with exc_ctx: processor.apply( "<image>" * num_images, mm_data=mm_data, hf_processor_mm_kwargs={}, ) class DummyProcessor: def __init__(self, a: int = 0, b: int = 0) -> None: super().__init__() self.a = a self.b = b def __call__( self, a: int = 0, c: int = 0, return_tensors: str | None = None, ) -> dict[str, int]: return dict(a=a, c=c) @pytest.mark.parametrize("model_id", ["Qwen/Qwen2-VL-2B-Instruct"]) # Dummy @pytest.mark.parametrize( ("config_kwargs", "inference_kwargs", "expected_kwargs"), [ ({"a": 1}, {}, {"a": 1, "b": 0}), ({}, {"a": 1}, {"a": 1, "b": 0}), # inference_kwargs should take precedence ({"a": 1}, {"a": 2}, {"a": 2, "b": 0}), # Should ignore extra kwargs ({"a": 1, "c": 1}, {}, {"a": 1, "b": 0}), ({"b": 1, "c": 1}, {}, {"a": 0, "b": 1}), ], ) def test_hf_processor_init_kwargs( model_id, config_kwargs, inference_kwargs, expected_kwargs, ): ctx = InputProcessingContext( model_config=ModelConfig(model_id, mm_processor_kwargs=config_kwargs), tokenizer=None, ) processor = ctx.get_hf_processor( DummyProcessor, # type: ignore[arg-type] **inference_kwargs, ) assert processor.a == expected_kwargs["a"] assert processor.b == expected_kwargs["b"] @pytest.mark.parametrize("model_id", ["Qwen/Qwen2-VL-2B-Instruct"]) # Dummy @pytest.mark.parametrize( ("config_kwargs", "inference_kwargs", "expected_kwargs"), [ ({"a": 1}, {}, {"a": 1, "c": 0}), ({}, {"a": 1}, {"a": 1, "c": 0}), # inference_kwargs should take precedence ({"a": 1}, {"a": 2}, {"a": 2, "c": 0}), # Should ignore extra kwargs ({"a": 1, "c": 1}, {}, {"a": 1, "c": 1}), ({"b": 1, "c": 1}, {}, {"a": 0, "c": 1}), ], ) def test_hf_processor_call_kwargs( model_id, config_kwargs, inference_kwargs, expected_kwargs, ): ctx = InputProcessingContext( model_config=ModelConfig(model_id, mm_processor_kwargs=config_kwargs), tokenizer=None, ) processor = ctx.get_hf_processor(DummyProcessor) # type: ignore[arg-type] result = ctx.call_hf_processor(processor, {}, inference_kwargs) assert result == expected_kwargs def test_apply_matches_no_match_exits_quickly(): """
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
true
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/transformers_utils/test_processor.py
tests/transformers_utils/test_processor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import importlib from transformers.processing_utils import ProcessingKwargs from typing_extensions import Unpack from vllm.transformers_utils.processor import ( get_processor_kwargs_from_processor, ) class _FakeProcessorKwargs(ProcessingKwargs, total=False): # type: ignore pass def _assert_has_all_expected(keys: set[str]) -> None: # text for k in ("text_pair", "text_target", "text_pair_target"): assert k in keys # image for k in ("do_convert_rgb", "do_resize"): assert k in keys # audio for k in ( "fps", "do_sample_frames", "input_data_format", "default_to_square", ): assert k in keys # audio for k in ("padding", "return_attention_mask"): assert k in keys # Path 1: __call__ method has kwargs: Unpack[*ProcessingKwargs] class _ProcWithUnpack: def __call__(self, *args, **kwargs: Unpack[_FakeProcessorKwargs]): # type: ignore return None def test_get_processor_kwargs_from_processor_unpack_path_returns_full_union(): proc = _ProcWithUnpack() keys = get_processor_kwargs_from_processor(proc) _assert_has_all_expected(keys) # ---- Path 2: No Unpack, fallback to scanning *ProcessingKwargs in module ---- class _ProcWithoutUnpack: def __call__(self, *args, **kwargs): return None def test_get_processor_kwargs_from_processor_module_scan_returns_full_union(): # ensure the module scanned by fallback is this test module module_name = _ProcWithoutUnpack.__module__ mod = importlib.import_module(module_name) assert hasattr(mod, "_FakeProcessorKwargs") proc = _ProcWithoutUnpack() keys = get_processor_kwargs_from_processor(proc) _assert_has_all_expected(keys)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/transformers_utils/test_config_parser_registry.py
tests/transformers_utils/test_config_parser_registry.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from pathlib import Path import pytest from transformers import PretrainedConfig from vllm.transformers_utils.config import get_config_parser, register_config_parser from vllm.transformers_utils.config_parser_base import ConfigParserBase @register_config_parser("custom_config_parser") class CustomConfigParser(ConfigParserBase): def parse( self, model: str | Path, trust_remote_code: bool, revision: str | None = None, code_revision: str | None = None, **kwargs, ) -> tuple[dict, PretrainedConfig]: raise NotImplementedError def test_register_config_parser(): assert isinstance(get_config_parser("custom_config_parser"), CustomConfigParser) def test_invalid_config_parser(): with pytest.raises(ValueError): @register_config_parser("invalid_config_parser") class InvalidConfigParser: pass
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/transformers_utils/test_config.py
tests/transformers_utils/test_config.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This test file includes some cases where it is inappropriate to only get the `eos_token_id` from the tokenizer as defined by `vllm.LLMEngine._get_eos_token_id`. """ from vllm.tokenizers import get_tokenizer from vllm.transformers_utils.config import try_get_generation_config def test_get_llama3_eos_token(): model_name = "meta-llama/Llama-3.2-1B-Instruct" tokenizer = get_tokenizer(model_name) assert tokenizer.eos_token_id == 128009 generation_config = try_get_generation_config(model_name, trust_remote_code=False) assert generation_config is not None assert generation_config.eos_token_id == [128001, 128008, 128009] def test_get_blip2_eos_token(): model_name = "Salesforce/blip2-opt-2.7b" tokenizer = get_tokenizer(model_name) assert tokenizer.eos_token_id == 2 generation_config = try_get_generation_config(model_name, trust_remote_code=False) assert generation_config is not None assert generation_config.eos_token_id == 50118
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/transformers_utils/test_utils.py
tests/transformers_utils/test_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from pathlib import Path from unittest.mock import patch import pytest from vllm.transformers_utils.gguf_utils import ( is_gguf, is_remote_gguf, split_remote_gguf, ) from vllm.transformers_utils.utils import ( is_cloud_storage, is_gcs, is_s3, ) def test_is_gcs(): assert is_gcs("gs://model-path") assert not is_gcs("s3://model-path/path-to-model") assert not is_gcs("/unix/local/path") assert not is_gcs("nfs://nfs-fqdn.local") def test_is_s3(): assert is_s3("s3://model-path/path-to-model") assert not is_s3("gs://model-path") assert not is_s3("/unix/local/path") assert not is_s3("nfs://nfs-fqdn.local") def test_is_cloud_storage(): assert is_cloud_storage("gs://model-path") assert is_cloud_storage("s3://model-path/path-to-model") assert not is_cloud_storage("/unix/local/path") assert not is_cloud_storage("nfs://nfs-fqdn.local") class TestIsRemoteGGUF: """Test is_remote_gguf utility function.""" def test_is_remote_gguf_with_colon_and_slash(self): """Test is_remote_gguf with repo_id:quant_type format.""" # Valid quant types assert is_remote_gguf("unsloth/Qwen3-0.6B-GGUF:IQ1_S") assert is_remote_gguf("user/repo:Q2_K") assert is_remote_gguf("repo/model:Q4_K") assert is_remote_gguf("repo/model:Q8_0") # Invalid quant types should return False assert not is_remote_gguf("repo/model:quant") assert not is_remote_gguf("repo/model:INVALID") assert not is_remote_gguf("repo/model:invalid_type") def test_is_remote_gguf_without_colon(self): """Test is_remote_gguf without colon.""" assert not is_remote_gguf("repo/model") assert not is_remote_gguf("unsloth/Qwen3-0.6B-GGUF") def test_is_remote_gguf_without_slash(self): """Test is_remote_gguf without slash.""" assert not is_remote_gguf("model.gguf") # Even with valid quant_type, no slash means not remote GGUF assert not is_remote_gguf("model:IQ1_S") assert not is_remote_gguf("model:quant") def test_is_remote_gguf_local_path(self): """Test is_remote_gguf with local file path.""" assert not is_remote_gguf("/path/to/model.gguf") assert not is_remote_gguf("./model.gguf") def test_is_remote_gguf_with_path_object(self): """Test is_remote_gguf with Path object.""" assert is_remote_gguf(Path("unsloth/Qwen3-0.6B-GGUF:IQ1_S")) assert not is_remote_gguf(Path("repo/model")) def test_is_remote_gguf_with_http_https(self): """Test is_remote_gguf with HTTP/HTTPS URLs.""" # HTTP/HTTPS URLs should return False even with valid quant_type assert not is_remote_gguf("http://example.com/repo/model:IQ1_S") assert not is_remote_gguf("https://huggingface.co/repo/model:Q2_K") assert not is_remote_gguf("http://repo/model:Q4_K") assert not is_remote_gguf("https://repo/model:Q8_0") def test_is_remote_gguf_with_cloud_storage(self): """Test is_remote_gguf with cloud storage paths.""" # Cloud storage paths should return False even with valid quant_type assert not is_remote_gguf("s3://bucket/repo/model:IQ1_S") assert not is_remote_gguf("gs://bucket/repo/model:Q2_K") assert not is_remote_gguf("s3://repo/model:Q4_K") assert not is_remote_gguf("gs://repo/model:Q8_0") class TestSplitRemoteGGUF: """Test split_remote_gguf utility function.""" def test_split_remote_gguf_valid(self): """Test split_remote_gguf with valid repo_id:quant_type format.""" repo_id, quant_type = split_remote_gguf("unsloth/Qwen3-0.6B-GGUF:IQ1_S") assert repo_id == "unsloth/Qwen3-0.6B-GGUF" assert quant_type == "IQ1_S" repo_id, quant_type = split_remote_gguf("repo/model:Q2_K") assert repo_id == "repo/model" assert quant_type == "Q2_K" def test_split_remote_gguf_with_path_object(self): """Test split_remote_gguf with Path object.""" repo_id, quant_type = split_remote_gguf(Path("unsloth/Qwen3-0.6B-GGUF:IQ1_S")) assert repo_id == "unsloth/Qwen3-0.6B-GGUF" assert quant_type == "IQ1_S" def test_split_remote_gguf_invalid(self): """Test split_remote_gguf with invalid format.""" # Invalid format (no colon) - is_remote_gguf returns False with pytest.raises(ValueError, match="Wrong GGUF model"): split_remote_gguf("repo/model") # Invalid quant type - is_remote_gguf returns False with pytest.raises(ValueError, match="Wrong GGUF model"): split_remote_gguf("repo/model:INVALID_TYPE") # HTTP URL - is_remote_gguf returns False with pytest.raises(ValueError, match="Wrong GGUF model"): split_remote_gguf("http://repo/model:IQ1_S") # Cloud storage - is_remote_gguf returns False with pytest.raises(ValueError, match="Wrong GGUF model"): split_remote_gguf("s3://bucket/repo/model:Q2_K") class TestIsGGUF: """Test is_gguf utility function.""" @patch("vllm.transformers_utils.gguf_utils.check_gguf_file", return_value=True) def test_is_gguf_with_local_file(self, mock_check_gguf): """Test is_gguf with local GGUF file.""" assert is_gguf("/path/to/model.gguf") assert is_gguf("./model.gguf") def test_is_gguf_with_remote_gguf(self): """Test is_gguf with remote GGUF format.""" # Valid remote GGUF format (repo_id:quant_type with valid quant_type) assert is_gguf("unsloth/Qwen3-0.6B-GGUF:IQ1_S") assert is_gguf("repo/model:Q2_K") assert is_gguf("repo/model:Q4_K") # Invalid quant_type should return False assert not is_gguf("repo/model:quant") assert not is_gguf("repo/model:INVALID") @patch("vllm.transformers_utils.gguf_utils.check_gguf_file", return_value=False) def test_is_gguf_false(self, mock_check_gguf): """Test is_gguf returns False for non-GGUF models.""" assert not is_gguf("unsloth/Qwen3-0.6B") assert not is_gguf("repo/model") assert not is_gguf("model") def test_is_gguf_edge_cases(self): """Test is_gguf with edge cases.""" # Empty string assert not is_gguf("") # Only colon, no slash (even with valid quant_type) assert not is_gguf("model:IQ1_S") # Only slash, no colon assert not is_gguf("repo/model") # HTTP/HTTPS URLs assert not is_gguf("http://repo/model:IQ1_S") assert not is_gguf("https://repo/model:Q2_K") # Cloud storage assert not is_gguf("s3://bucket/repo/model:IQ1_S") assert not is_gguf("gs://bucket/repo/model:Q2_K")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/transformers_utils/test_repo_utils.py
tests/transformers_utils/test_repo_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import tempfile from pathlib import Path from unittest.mock import MagicMock, call, patch import pytest from vllm.transformers_utils.repo_utils import list_filtered_repo_files @pytest.mark.parametrize( "allow_patterns,expected_relative_files", [ ( ["*.json", "correct*.txt"], ["json_file.json", "subfolder/correct.txt", "correct_2.txt"], ), ], ) def test_list_filtered_repo_files( allow_patterns: list[str], expected_relative_files: list[str] ): with tempfile.TemporaryDirectory() as tmp_dir: # Prep folder and files path_tmp_dir = Path(tmp_dir) subfolder = path_tmp_dir / "subfolder" subfolder.mkdir() (path_tmp_dir / "json_file.json").touch() (path_tmp_dir / "correct_2.txt").touch() (path_tmp_dir / "uncorrect.txt").touch() (path_tmp_dir / "uncorrect.jpeg").touch() (subfolder / "correct.txt").touch() (subfolder / "uncorrect_sub.txt").touch() def _glob_path() -> list[str]: return [ str(file.relative_to(path_tmp_dir)) for file in path_tmp_dir.glob("**/*") if file.is_file() ] # Patch list_repo_files called by fn with patch( "vllm.transformers_utils.repo_utils.list_repo_files", MagicMock(return_value=_glob_path()), ) as mock_list_repo_files: out_files = sorted( list_filtered_repo_files( tmp_dir, allow_patterns, "revision", "model", "token" ) ) assert out_files == sorted(expected_relative_files) assert mock_list_repo_files.call_count == 1 assert mock_list_repo_files.call_args_list[0] == call( repo_id=tmp_dir, revision="revision", repo_type="model", token="token", )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/transformers_utils/__init__.py
tests/transformers_utils/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/test_eagle_quantization.py
tests/model_executor/test_eagle_quantization.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from unittest.mock import Mock, patch import pytest import torch from vllm.config import LoadConfig, ModelConfig, SpeculativeConfig, VllmConfig from vllm.model_executor.models.utils import get_draft_quant_config from vllm.platforms import current_platform DEVICES = ( [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)] if current_platform.is_cuda_alike() else ["cpu"] ) def test_get_draft_quant_config_with_draft_model(): mock_draft_model_config = Mock(spec=ModelConfig) mock_load_config = Mock(spec=LoadConfig) mock_speculative_config = Mock(spec=SpeculativeConfig) mock_speculative_config.draft_model_config = mock_draft_model_config mock_vllm_config = Mock(spec=VllmConfig) mock_vllm_config.speculative_config = mock_speculative_config mock_vllm_config.load_config = mock_load_config mock_quant_config = Mock() with patch.object( VllmConfig, "get_quantization_config", return_value=mock_quant_config ): result = get_draft_quant_config(mock_vllm_config) # Verify the function calls get_quantization_config with draft model config VllmConfig.get_quantization_config.assert_called_once_with( mock_draft_model_config, mock_load_config ) assert result == mock_quant_config def test_get_draft_quant_config_without_draft_model(): mock_speculative_config = Mock(spec=SpeculativeConfig) mock_speculative_config.draft_model_config = None mock_vllm_config = Mock(spec=VllmConfig) mock_vllm_config.speculative_config = mock_speculative_config mock_vllm_config.load_config = Mock(spec=LoadConfig) result = get_draft_quant_config(mock_vllm_config) assert result is None @torch.inference_mode() @pytest.mark.parametrize("device", DEVICES) def test_fc_layer_quant_config_usage(dist_init, device) -> None: import torch from vllm.model_executor.layers.linear import ReplicatedLinear if current_platform.is_cuda_alike(): torch.cuda.set_device(device) torch.set_default_device(device) input_size = 256 output_size = 128 fc_no_quant = ReplicatedLinear( input_size=input_size, output_size=output_size, bias=False, params_dtype=torch.float16, quant_config=None, prefix="fc", ) assert fc_no_quant.quant_config is None assert fc_no_quant.input_size == input_size assert fc_no_quant.output_size == output_size mock_quant_config = Mock() fc_with_quant = ReplicatedLinear( input_size=input_size, output_size=output_size, bias=False, params_dtype=torch.float16, quant_config=mock_quant_config, prefix="fc", ) assert fc_with_quant.quant_config == mock_quant_config # Check forward pass x = torch.randn(2, input_size, dtype=torch.float16) output, _ = fc_no_quant(x) assert output.shape == (2, output_size) def test_kv_cache_scale_name_handling(): # Mock a quant config that supports cache scales mock_quant_config = Mock() mock_quant_config.get_cache_scale = Mock(return_value="layers.0.self_attn.kv_scale") # Condition check in load_weights name = "layers.0.self_attn.k_proj.weight" scale_name = mock_quant_config.get_cache_scale(name) # Check if get_cache_scale is called and returns expected value mock_quant_config.get_cache_scale.assert_called_once_with(name) assert scale_name == "layers.0.self_attn.kv_scale" def test_kv_cache_scale_name_no_scale(): # Mock a quant config that returns None for get_cache_scale mock_quant_config = Mock() mock_quant_config.get_cache_scale = Mock(return_value=None) name = "layers.0.mlp.gate_proj.weight" scale_name = mock_quant_config.get_cache_scale(name) # Should return None for weights that don't have cache scales assert scale_name is None def test_maybe_remap_kv_scale_name(): from vllm.model_executor.model_loader.weight_utils import maybe_remap_kv_scale_name params_dict = { "layers.0.self_attn.kv_scale": Mock(), "layers.1.self_attn.kv_scale": Mock(), } name = "layers.0.self_attn.some_scale" remapped = maybe_remap_kv_scale_name(name, params_dict) assert remapped in params_dict or remapped == name or remapped is None def test_load_weights_kv_scale_handling(): kv_scale_param = Mock() kv_scale_param.weight_loader = Mock() params_dict = { "layers.0.self_attn.kv_scale": kv_scale_param, } mock_quant_config = Mock() mock_quant_config.get_cache_scale = Mock(return_value="layers.0.self_attn.kv_scale") # Load_weights logic for KV cache scales name = "layers.0.self_attn.k_proj.weight" loaded_weight_tensor = torch.tensor([1.0, 2.0]) if mock_quant_config is not None: scale_name = mock_quant_config.get_cache_scale(name) if scale_name: param = params_dict[scale_name] assert param is kv_scale_param weight_to_load = ( loaded_weight_tensor if loaded_weight_tensor.dim() == 0 else loaded_weight_tensor[0] ) assert scale_name == "layers.0.self_attn.kv_scale" assert weight_to_load == loaded_weight_tensor[0]
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/test_enabled_custom_ops.py
tests/model_executor/test_enabled_custom_ops.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest import torch from vllm._aiter_ops import rocm_aiter_ops from vllm.config import ( CompilationConfig, VllmConfig, get_cached_compilation_config, set_current_vllm_config, ) from vllm.model_executor.custom_op import CustomOp from vllm.model_executor.layers.activation import ( GeluAndMul, ReLUSquaredActivation, SiluAndMul, ) from vllm.model_executor.layers.fused_moe.fused_moe import ( dispatch_topk_func, vllm_topk_softmax, ) from vllm.model_executor.layers.layernorm import ( RMSNorm, dispatch_rocm_rmsnorm_func, fused_add_rms_norm, rms_norm, ) from vllm.platforms import current_platform RMS_NORM_SUPPORTED_DTYPES = [torch.float16, torch.bfloat16] # Registered subclass for test @CustomOp.register("relu3") class Relu3(ReLUSquaredActivation): pass @pytest.mark.parametrize( "env, compilation_mode, backend, ops_enabled, default_on", [ # Default values based on compile level # - All by default (no Inductor compilation) (None, 0, "eager", [True] * 4, True), (None, 1, "eager", [True] * 4, True), (None, 2, "eager", [True] * 4, True), (None, 3, "eager", [True] * 4, True), # - None by default (with Inductor) (None, 0, "inductor", [True] * 4, True), # - None by default (with Inductor) (None, 1, "inductor", [False] * 4, False), (None, 2, "inductor", [False] * 4, False), (None, 3, "inductor", [False] * 4, False), # Explicitly enabling/disabling # # Default: all # # All but SiluAndMul ("+rms_norm,-silu_and_mul", 0, "inductor", [1, 0, 1, 1], True), # Only ReLU3 ("none,-rms_norm,+relu3", 1, "eager", [0, 0, 0, 1], False), # All but SiluAndMul ("all,-silu_and_mul", 2, "inductor", [1, 0, 1, 1], True), # All but ReLU3 (even if ReLU2 is on) ("-relu3,+relu2", 3, "eager", [1, 1, 1, 0], True), # RMSNorm and SiluAndMul ("none,-relu3,+rms_norm,+silu_and_mul", 3, "eager", [1, 1, 0, 0], False), # All but RMSNorm ("-rms_norm", 3, "eager", [0, 1, 1, 1], True), # # Default: none # # Only ReLU3 ("none,+relu3", 3, "inductor", [0, 0, 0, 1], False), # All but RMSNorm ("all,-rms_norm", 3, "inductor", [0, 1, 1, 1], True), ], ) def test_enabled_ops( env: str | None, compilation_mode: int, backend: str, ops_enabled: list[int], default_on: bool, ): custom_ops = env.split(",") if env else [] vllm_config = VllmConfig( compilation_config=CompilationConfig( backend=backend, mode=compilation_mode, custom_ops=custom_ops ) ) get_cached_compilation_config.cache_clear() with set_current_vllm_config(vllm_config): assert CustomOp.default_on() == default_on ops_enabled = [bool(x) for x in ops_enabled] assert RMSNorm(1024).enabled() == ops_enabled[0] assert CustomOp.op_registry["rms_norm"].enabled() == ops_enabled[0] assert SiluAndMul().enabled() == ops_enabled[1] assert CustomOp.op_registry["silu_and_mul"].enabled() == ops_enabled[1] assert GeluAndMul().enabled() == ops_enabled[2] assert CustomOp.op_registry["gelu_and_mul"].enabled() == ops_enabled[2] # If registered, subclasses should follow their own name assert Relu3().enabled() == ops_enabled[3] assert CustomOp.op_registry["relu3"].enabled() == ops_enabled[3] # Unregistered subclass class SiluAndMul2(SiluAndMul): pass # Subclasses should not require registration assert SiluAndMul2().enabled() == SiluAndMul().enabled() @pytest.mark.parametrize( "env", ["all,none", "all,+rms_norm,all", "+rms_norm,-rms_norm"] ) def test_enabled_ops_invalid(env: str): with pytest.raises(Exception): # noqa vllm_config = VllmConfig( compilation_config=CompilationConfig(custom_ops=env.split(",")) ) with set_current_vllm_config(vllm_config): RMSNorm(1024).enabled() @pytest.mark.parametrize( "use_rocm_aiter", [True, False] if current_platform.is_rocm() else [False] ) def test_topk_dispatch(use_rocm_aiter: bool): topk_func = dispatch_topk_func(use_rocm_aiter) if current_platform.is_rocm() and use_rocm_aiter: assert topk_func == rocm_aiter_ops.topk_softmax else: assert topk_func == vllm_topk_softmax @pytest.mark.parametrize("add_residual", [True, False]) @pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16]) @pytest.mark.parametrize("use_rocm_aiter", [True, False]) @pytest.mark.skipif( not current_platform.is_rocm(), reason="AITER is a feature exclusive for ROCm" ) def test_rms_norm_dispatch( add_residual: bool, dtype: torch.dtype, use_rocm_aiter: bool ): rms_norm_func = dispatch_rocm_rmsnorm_func(add_residual, dtype, use_rocm_aiter) should_use_rocm_aiter = ( current_platform.is_rocm() and use_rocm_aiter and dtype in RMS_NORM_SUPPORTED_DTYPES ) if add_residual and should_use_rocm_aiter: assert rms_norm_func == rocm_aiter_ops.rms_norm2d_with_add elif should_use_rocm_aiter: assert rms_norm_func == rocm_aiter_ops.rms_norm elif add_residual: assert rms_norm_func == fused_add_rms_norm else: assert rms_norm_func == rms_norm
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/test_qwen3_omni.py
tests/model_executor/test_qwen3_omni.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from unittest.mock import Mock import pytest from transformers import PretrainedConfig from vllm.multimodal.processing import InputProcessingContext # Helper function to print input IDs with coalesced audio/video tokens. def print_input_ids(input_ids): """ Print input IDs, compressing consecutive special tokens. - 151675: <|audio_pad|> - 151656: <|video_pad|> """ if not input_ids: print("[]") return result = [] i = 0 while i < len(input_ids): current_id = input_ids[i] # Check if it's a special token that should be compressed if current_id in [151675, 151656]: # Count consecutive occurrences count = 1 while i + count < len(input_ids) and input_ids[i + count] == current_id: count += 1 # Add compressed representation token_name = "<|audio_pad|>" if current_id == 151675 else "<|video_pad|>" result.append(f"{token_name} * {count}") i += count else: # Regular token, just add it result.append(str(current_id)) i += 1 print(", ".join(result)) @pytest.fixture def mock_qwen3_omni_config(): """Create a mock Qwen3OmniMoeThinker config.""" config = Mock(spec=PretrainedConfig) # Token IDs from https://huggingface.co/Qwen/Qwen3-Omni-30B-A3B-Instruct/blob/main/tokenizer_config.json config.audio_token_id = 151675 # <|audio_pad|> config.video_token_id = 151656 # <|video_pad|> config.image_token_id = 151655 # <|image_pad|> config.audio_start_token_id = 151669 # <|audio_start|> config.audio_end_token_id = 151670 # <|audio_end|> config.vision_start_token_id = 151652 # <|vision_start|> config.position_id_per_seconds = 12.5 # Vision config vision_config = Mock() vision_config.spatial_merge_size = 2 config.vision_config = vision_config return config @pytest.fixture def mock_processor(): """Create a mock HF processor.""" from transformers.models.whisper import WhisperFeatureExtractor processor = Mock() processor.audio_token = "<|audio_pad|>" processor.image_token = "<|image_pad|>" processor.video_token = "<|video_pad|>" # Create a real WhisperFeatureExtractor instance for the feature_extractor attribute feature_extractor = WhisperFeatureExtractor() processor.feature_extractor = feature_extractor return processor @pytest.fixture def mock_tokenizer(): """Create a mock tokenizer.""" tokenizer = Mock() # Token IDs from https://huggingface.co/Qwen/Qwen3-Omni-30B-A3B-Instruct/blob/main/tokenizer_config.json tokenizer.get_vocab = Mock( return_value={ "<|audio_pad|>": 151675, "<|video_pad|>": 151656, "<|image_pad|>": 151655, "<|audio_start|>": 151669, "<|audio_end|>": 151670, "<|vision_start|>": 151652, "<|vision_end|>": 151653, } ) tokenizer.encode = Mock( side_effect=lambda x: { "<|vision_start|>": [151652], "<|vision_end|>": [151653], "<|audio_start|>": [151669], "<|audio_end|>": [151670], "<|audio_pad|>": [151675], "<|image_pad|>": [151655], "<|video_pad|>": [151656], }.get(x, [0]) ) tokenizer.vision_bos_token = "<|vision_start|>" tokenizer.vision_eos_token = "<|vision_end|>" tokenizer.audio_bos_token = "<|audio_start|>" tokenizer.audio_eos_token = "<|audio_end|>" return tokenizer @pytest.fixture def mock_image_processor(): """Create a mock image processor.""" image_processor = Mock() image_processor.merge_size = 2 return image_processor def test_qwen3_omni_get_updates_use_audio_in_video( mock_qwen3_omni_config, mock_processor, mock_tokenizer, mock_image_processor, ): """Test the get_updates_use_audio_in_video method directly.""" from vllm.model_executor.models.qwen3_omni_moe_thinker import ( Qwen3OmniMoeThinkerMultiModalProcessor, Qwen3OmniMoeThinkerProcessingInfo, ) # Create a mock context mock_ctx = Mock(spec=InputProcessingContext) # Create processing info info = Qwen3OmniMoeThinkerProcessingInfo(mock_ctx) info.get_hf_config = Mock(return_value=mock_qwen3_omni_config) info.get_hf_processor = Mock(return_value=mock_processor) info.get_tokenizer = Mock(return_value=mock_tokenizer) info.get_image_processor = Mock(return_value=mock_image_processor) # Create a mock dummy_inputs builder mock_dummy_inputs = Mock() # Create the processor processor = Qwen3OmniMoeThinkerMultiModalProcessor(info, mock_dummy_inputs) # Test parameters from reference video # https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-Omni/demo/draw.mp4 audio_len = 85 video_grid_thw = [6, 36, 64] video_second_per_grid_t = 2.0 # Call the method updates = processor.get_updates_use_audio_in_video( thinker_config=mock_qwen3_omni_config, audio_len=audio_len, video_grid_thw=video_grid_thw, video_second_per_grid_t=video_second_per_grid_t, ) # Updated input ids should align with HF implementation. # 151669, # <|video_pad|> * 576, <|audio_pad|> * 25, # <|video_pad|> * 576, <|audio_pad|> * 25, # <|video_pad|> * 576, <|audio_pad|> * 25, # <|video_pad|> * 576, <|audio_pad|> * 10, # <|video_pad|> * 1152, # 151670 print_input_ids(updates) # Verify structure assert isinstance(updates, list) assert len(updates) > 0 # Verify start and end tokens audio_start_token_id = mock_qwen3_omni_config.audio_start_token_id audio_end_token_id = mock_qwen3_omni_config.audio_end_token_id assert updates[0] == audio_start_token_id assert updates[-1] == audio_end_token_id # Verify both audio and video tokens are present audio_token_id = mock_qwen3_omni_config.audio_token_id video_token_id = mock_qwen3_omni_config.video_token_id audio_count = updates.count(audio_token_id) video_count = updates.count(video_token_id) assert audio_count == audio_len, ( f"Expected {audio_len} audio tokens, got {audio_count}" ) # Calculate expected video token count spatial_merge_size = mock_qwen3_omni_config.vision_config.spatial_merge_size height = video_grid_thw[1] // spatial_merge_size width = video_grid_thw[2] // spatial_merge_size expected_video_count = video_grid_thw[0] * height * width assert video_count == expected_video_count, ( f"Expected {expected_video_count} video tokens, got {video_count}" ) # Total tokens should be: 1 (start) + audio_len + video_count + 1 (end) expected_total = 1 + audio_len + expected_video_count + 1 assert len(updates) == expected_total, ( f"Expected {expected_total} total tokens, got {len(updates)}" ) if __name__ == "__main__": pytest.main([__file__, "-v"])
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/test_model_load_with_params.py
tests/model_executor/test_model_load_with_params.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import pytest from vllm.model_executor.layers.pooler import ( CLSPool, DispatchPooler, MeanPool, PoolingType, ) from vllm.model_executor.models.bert import BertEmbeddingModel from vllm.model_executor.models.roberta import RobertaEmbeddingModel from vllm.platforms import current_platform MAX_MODEL_LEN = 128 MODEL_NAME = os.environ.get("MODEL_NAME", "BAAI/bge-base-en-v1.5") REVISION = os.environ.get("REVISION", "main") MODEL_NAME_ROBERTA = os.environ.get("MODEL_NAME", "intfloat/multilingual-e5-base") REVISION_ROBERTA = os.environ.get("REVISION", "main") @pytest.mark.skipif( current_platform.is_rocm(), reason="Xformers backend is not supported on ROCm." ) def test_model_loading_with_params(vllm_runner, monkeypatch): """ Test parameter weight loading with tp>1. """ # to use apply_model monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1") with vllm_runner( model_name=MODEL_NAME, revision=REVISION, dtype="float16", max_model_len=MAX_MODEL_LEN, ) as vllm_model: output = vllm_model.embed( "Write a short story about a robot that dreams for the first time.\n" ) model_config = vllm_model.llm.llm_engine.model_config model_tokenizer = vllm_model.llm.llm_engine.tokenizer # asserts on the bert model config file assert model_config.encoder_config["max_seq_length"] == 512 assert model_config.encoder_config["do_lower_case"] # asserts on the pooling config files assert model_config.pooler_config.pooling_type == PoolingType.CLS.name assert model_config.pooler_config.normalize # asserts on the tokenizer loaded assert model_config.tokenizer == "BAAI/bge-base-en-v1.5" assert model_tokenizer.model_max_length == 512 def check_model(model): assert isinstance(model, BertEmbeddingModel) assert isinstance(pooler := model.pooler, DispatchPooler) assert isinstance(pooler.poolers_by_task["embed"].pooling, CLSPool) vllm_model.apply_model(check_model) assert output @pytest.mark.skipif( current_platform.is_rocm(), reason="Xformers backend is not supported on ROCm." ) def test_roberta_model_loading_with_params(vllm_runner, monkeypatch): """ Test parameter weight loading with tp>1. """ # to use apply_model monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1") with vllm_runner( model_name=MODEL_NAME_ROBERTA, revision=REVISION_ROBERTA, dtype="float16", max_model_len=MAX_MODEL_LEN, ) as vllm_model: output = vllm_model.embed( "Write a short story about a robot that dreams for the first time.\n" ) model_config = vllm_model.llm.llm_engine.model_config model_tokenizer = vllm_model.llm.llm_engine.tokenizer # asserts on the bert model config file assert model_config.encoder_config["max_seq_length"] == 512 assert not model_config.encoder_config["do_lower_case"] # asserts on the pooling config files assert model_config.pooler_config.pooling_type == PoolingType.MEAN.name assert model_config.pooler_config.normalize # asserts on the tokenizer loaded assert model_config.tokenizer == "intfloat/multilingual-e5-base" assert model_tokenizer.model_max_length == 512 def check_model(model): assert isinstance(model, RobertaEmbeddingModel) assert isinstance(pooler := model.pooler, DispatchPooler) assert isinstance(pooler.poolers_by_task["embed"].pooling, MeanPool) vllm_model.apply_model(check_model) assert output @pytest.mark.skipif( current_platform.is_rocm(), reason="Xformers backend is not supported on ROCm." ) def test_facebook_roberta_model_loading_with_params(vllm_runner, monkeypatch): """ Test loading roberta-base model with no lm_head. """ # to use apply_model monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1") model_name = "FacebookAI/roberta-base" with vllm_runner( model_name=model_name, dtype="float16", max_model_len=MAX_MODEL_LEN ) as vllm_model: output = vllm_model.embed( "Write a short story about a robot that dreams for the first time.\n" ) assert vllm_model.llm.llm_engine.model_config.tokenizer == model_name def check_model(model): assert isinstance(model, RobertaEmbeddingModel) assert not hasattr(model, "lm_head") assert isinstance(pooler := model.pooler, DispatchPooler) assert isinstance(pooler.poolers_by_task["embed"].pooling, CLSPool) vllm_model.apply_model(check_model) assert output
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/__init__.py
tests/model_executor/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/test_weight_utils.py
tests/model_executor/test_weight_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import tempfile import huggingface_hub.constants import pytest from huggingface_hub.utils import LocalEntryNotFoundError from vllm.model_executor.model_loader.weight_utils import ( download_weights_from_hf, enable_hf_transfer, ) def test_hf_transfer_auto_activation(): if "HF_HUB_ENABLE_HF_TRANSFER" in os.environ: # in case it is already set, we can't test the auto activation pytest.skip("HF_HUB_ENABLE_HF_TRANSFER is set, can't test auto activation") enable_hf_transfer() try: # enable hf hub transfer if available import hf_transfer # type: ignore # noqa HF_TRANSFER_ACTIVE = True except ImportError: HF_TRANSFER_ACTIVE = False assert huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER == HF_TRANSFER_ACTIVE def test_download_weights_from_hf(): with tempfile.TemporaryDirectory() as tmpdir: # assert LocalEntryNotFoundError error is thrown # if offline is set and model is not cached huggingface_hub.constants.HF_HUB_OFFLINE = True with pytest.raises(LocalEntryNotFoundError): download_weights_from_hf( "facebook/opt-125m", allow_patterns=["*.safetensors", "*.bin"], cache_dir=tmpdir, ) # download the model huggingface_hub.constants.HF_HUB_OFFLINE = False download_weights_from_hf( "facebook/opt-125m", allow_patterns=["*.safetensors", "*.bin"], cache_dir=tmpdir, ) # now it should work offline huggingface_hub.constants.HF_HUB_OFFLINE = True assert ( download_weights_from_hf( "facebook/opt-125m", allow_patterns=["*.safetensors", "*.bin"], cache_dir=tmpdir, ) is not None ) if __name__ == "__main__": test_hf_transfer_auto_activation() test_download_weights_from_hf()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/model_loader/test_registry.py
tests/model_executor/model_loader/test_registry.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from torch import nn from vllm.config import ModelConfig from vllm.config.load import LoadConfig from vllm.model_executor.model_loader import get_model_loader, register_model_loader from vllm.model_executor.model_loader.base_loader import BaseModelLoader @register_model_loader("custom_load_format") class CustomModelLoader(BaseModelLoader): def __init__(self, load_config: LoadConfig) -> None: super().__init__(load_config) def download_model(self, model_config: ModelConfig) -> None: pass def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None: pass def test_register_model_loader(): load_config = LoadConfig(load_format="custom_load_format") assert isinstance(get_model_loader(load_config), CustomModelLoader) def test_invalid_model_loader(): with pytest.raises(ValueError): @register_model_loader("invalid_load_format") class InValidModelLoader: pass
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/model_loader/__init__.py
tests/model_executor/model_loader/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/model_loader/test_sharded_state_loader.py
tests/model_executor/model_loader/test_sharded_state_loader.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import fnmatch import multiprocessing as mp import os import shutil from tempfile import TemporaryDirectory import pytest import torch from huggingface_hub import snapshot_download from vllm import LLM, SamplingParams from vllm.model_executor.model_loader import ShardedStateLoader prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a sampling params object. sampling_params = SamplingParams( temperature=0, max_tokens=256, ignore_eos=True, ) def test_filter_subtensors(): state_dict = { "a": torch.empty(2), "b": torch.empty((2, 4)), "c": torch.empty((2, 4, 8)), } state_dict.update( { "x": state_dict["b"], "y": state_dict["c"][1, 2, :], "z": state_dict["c"][1, :, 4], } ) filtered_state_dict = ShardedStateLoader._filter_subtensors(state_dict) assert tuple(filtered_state_dict.keys()) == ("a", "b", "c") for key, tensor in filtered_state_dict.items(): # NOTE: don't use `equal` here, as the tensor might contain NaNs assert tensor is state_dict[key] @pytest.fixture(scope="module") def llama_3p2_1b_files(): input_dir = snapshot_download( "meta-llama/Llama-3.2-1B-Instruct", ignore_patterns=["*.bin*", "original/*"] ) yield input_dir def _run_writer(input_dir, output_dir, weights_patterns, **kwargs): llm_sharded_writer = LLM(model=input_dir, **kwargs) # Dump worker states to output directory llm_sharded_writer.llm_engine.engine_core.save_sharded_state(path=output_dir) # Copy metadata files to output directory for file in os.listdir(input_dir): if os.path.isdir(os.path.join(input_dir, file)): shutil.copytree( os.path.join(input_dir, file), os.path.join(output_dir, file) ) elif not any(fnmatch.fnmatch(file, ext) for ext in weights_patterns): shutil.copy(os.path.join(input_dir, file), output_dir) def _run_generate(input_dir, queue: mp.Queue, **kwargs): llm = LLM(model=input_dir, **kwargs) gen = llm.generate(prompts, sampling_params) queue.put([g.outputs[0].__dict__ for g in gen]) queue.close() queue.join_thread() @pytest.mark.parametrize("enable_lora", [False, True]) @pytest.mark.parametrize("tp_size", [1, 2]) def test_sharded_state_loader( enable_lora, tp_size, num_gpus_available, llama_3p2_1b_files ): if num_gpus_available < tp_size: pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}") weights_patterns = ("*.safetensors",) gpu_memory_utilization = 0.8 input_dir = llama_3p2_1b_files ctx = mp.get_context("spawn") # Run in separate processes for memory & CUDA isolation with TemporaryDirectory() as output_dir: p = ctx.Process( target=_run_writer, args=(input_dir, output_dir, weights_patterns), kwargs=dict( tensor_parallel_size=tp_size, gpu_memory_utilization=gpu_memory_utilization, enforce_eager=True, ), ) p.start() p.join() queue = ctx.Queue() p = ctx.Process( target=_run_generate, args=(input_dir, queue), kwargs=dict( enable_lora=enable_lora, gpu_memory_utilization=gpu_memory_utilization, tensor_parallel_size=tp_size, ), ) p.start() # Call queue.get() before p.join() to prevent deadlock: # If p.join() is called before queue.get() and the queue is full, # the child process may block while writing to the queue and never # terminate, causing the parent to wait indefinitely on p.join(). # See: https://github.com/vllm-project/vllm/pull/22371#discussion_r2257773814 out_before = queue.get() p.join() queue.close() queue.join_thread() queue = ctx.Queue() p = ctx.Process( target=_run_generate, args=(output_dir, queue), kwargs=dict( enable_lora=enable_lora, gpu_memory_utilization=gpu_memory_utilization, tensor_parallel_size=tp_size, load_format="sharded_state", ), ) p.start() # Call queue.get() before p.join() to prevent deadlock: # If p.join() is called before queue.get() and the queue is full, # the child process may block while writing to the queue and never # terminate, causing the parent to wait indefinitely on p.join(). # See: https://github.com/vllm-project/vllm/pull/22371#discussion_r2257773814 out_after = queue.get() p.join() queue.close() queue.join_thread() assert out_before == out_after
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/model_loader/fastsafetensors_loader/test_fastsafetensors_loader.py
tests/model_executor/model_loader/fastsafetensors_loader/test_fastsafetensors_loader.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest from vllm import SamplingParams from vllm.platforms import current_platform test_model = "openai-community/gpt2" prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a sampling params object. sampling_params = SamplingParams(temperature=0.8, top_p=0.95, seed=0) @pytest.mark.skipif( not current_platform.is_cuda_alike(), reason="fastsafetensors requires NVIDIA/AMD GPUs", ) def test_model_loader_download_files(vllm_runner): with vllm_runner(test_model, load_format="fastsafetensors") as llm: deserialized_outputs = llm.generate(prompts, sampling_params) assert deserialized_outputs
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/model_loader/fastsafetensors_loader/__init__.py
tests/model_executor/model_loader/fastsafetensors_loader/__init__.py
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/model_loader/fastsafetensors_loader/test_weight_utils.py
tests/model_executor/model_loader/fastsafetensors_loader/test_weight_utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import glob import tempfile import huggingface_hub.constants import pytest import torch from vllm.model_executor.model_loader.weight_utils import ( download_weights_from_hf, fastsafetensors_weights_iterator, safetensors_weights_iterator, ) from vllm.platforms import current_platform @pytest.mark.skipif( not current_platform.is_cuda_alike(), reason="fastsafetensors requires NVIDIA/AMD GPUs", ) def test_fastsafetensors_model_loader(): with tempfile.TemporaryDirectory() as tmpdir: huggingface_hub.constants.HF_HUB_OFFLINE = False download_weights_from_hf( "openai-community/gpt2", allow_patterns=["*.safetensors"], cache_dir=tmpdir ) safetensors = glob.glob(f"{tmpdir}/**/*.safetensors", recursive=True) assert len(safetensors) > 0 fastsafetensors_tensors = {} hf_safetensors_tensors = {} for name, tensor in fastsafetensors_weights_iterator(safetensors, True): fastsafetensors_tensors[name] = tensor for name, tensor in safetensors_weights_iterator(safetensors, True): hf_safetensors_tensors[name] = tensor assert len(fastsafetensors_tensors) == len(hf_safetensors_tensors) for name, fastsafetensors_tensor in fastsafetensors_tensors.items(): fastsafetensors_tensor = fastsafetensors_tensor.to("cpu") assert fastsafetensors_tensor.dtype == hf_safetensors_tensors[name].dtype assert fastsafetensors_tensor.shape == hf_safetensors_tensors[name].shape assert torch.all(fastsafetensors_tensor.eq(hf_safetensors_tensors[name])) if __name__ == "__main__": test_fastsafetensors_model_loader()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/tests/model_executor/model_loader/tensorizer_loader/conftest.py
tests/model_executor/model_loader/tensorizer_loader/conftest.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Callable import pytest from vllm import LLM, EngineArgs from vllm.distributed import cleanup_dist_env_and_memory from vllm.model_executor.model_loader import tensorizer as tensorizer_mod from vllm.model_executor.model_loader.tensorizer import TensorizerConfig from vllm.utils.network_utils import get_distributed_init_method, get_ip, get_open_port from vllm.v1.executor import UniProcExecutor from vllm.v1.worker.worker_base import WorkerWrapperBase MODEL_REF = "facebook/opt-125m" @pytest.fixture() def model_ref(): return MODEL_REF @pytest.fixture(autouse=True) def allow_insecure_serialization(monkeypatch): monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1") @pytest.fixture(autouse=True) def cleanup(): cleanup_dist_env_and_memory(shutdown_ray=True) @pytest.fixture() def just_serialize_model_tensors(model_ref, monkeypatch, tmp_path): def noop(*args, **kwargs): return None args = EngineArgs(model=model_ref) tc = TensorizerConfig(tensorizer_uri=f"{tmp_path}/model.tensors") monkeypatch.setattr(tensorizer_mod, "serialize_extra_artifacts", noop) tensorizer_mod.tensorize_vllm_model(args, tc) yield tmp_path @pytest.fixture(autouse=True) def tensorizer_config(): config = TensorizerConfig(tensorizer_uri="vllm") return config @pytest.fixture() def model_path(model_ref, tmp_path): yield tmp_path / model_ref / "model.tensors" def assert_from_collective_rpc(engine: LLM, closure: Callable, closure_kwargs: dict): res = engine.collective_rpc(method=closure, kwargs=closure_kwargs) return all(res) # This is an object pulled from tests/v1/engine/test_engine_core.py # Modified to strip the `load_model` method from its `_init_executor` # method. It's purely used as a dummy utility to run methods that test # Tensorizer functionality class DummyExecutor(UniProcExecutor): def _init_executor(self) -> None: """Initialize the worker and load the model.""" self.driver_worker = WorkerWrapperBase(vllm_config=self.vllm_config, rpc_rank=0) distributed_init_method = get_distributed_init_method(get_ip(), get_open_port()) local_rank = 0 # set local rank as the device index if specified device_info = self.vllm_config.device_config.device.__str__().split(":") if len(device_info) > 1: local_rank = int(device_info[1]) rank = 0 is_driver_worker = True kwargs = dict( vllm_config=self.vllm_config, local_rank=local_rank, rank=rank, distributed_init_method=distributed_init_method, is_driver_worker=is_driver_worker, ) self.mm_receiver_cache = None self.collective_rpc("init_worker", args=([kwargs],)) self.collective_rpc("init_device") @property def max_concurrent_batches(self) -> int: return 2 def shutdown(self): if hasattr(self, "thread_pool"): self.thread_pool.shutdown(wait=False)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false