repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/qwen_1m.py
examples/offline_inference/qwen_1m.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os from urllib.request import urlopen from vllm import LLM, SamplingParams os.environ["VLLM_ALLOW_LONG_MAX_MODEL_LEN"] = "1" def load_prompt() -> str: # Test cases with various lengths can be found at: # # https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/64k.txt # https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/200k.txt # https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/600k.txt # https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/1m.txt with urlopen( "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/600k.txt", timeout=5, ) as response: prompt = response.read().decode("utf-8") return prompt # Processing the prompt. def process_requests(llm: LLM, prompts: list[str]) -> None: # Create a sampling params object. sampling_params = SamplingParams( temperature=0.7, top_p=0.8, top_k=20, repetition_penalty=1.05, detokenize=True, max_tokens=256, ) # Generate texts from the prompts. outputs = llm.generate(prompts, sampling_params) # Print the outputs. for output in outputs: prompt_token_ids = output.prompt_token_ids generated_text = output.outputs[0].text print( f"Prompt length: {len(prompt_token_ids)}, " f"Generated text: {generated_text!r}" ) # Create an LLM. def initialize_engine() -> LLM: llm = LLM( model="Qwen/Qwen2.5-7B-Instruct-1M", max_model_len=1048576, tensor_parallel_size=4, enforce_eager=True, enable_chunked_prefill=True, max_num_batched_tokens=131072, ) return llm def main(): llm = initialize_engine() prompt = load_prompt() process_requests(llm, [prompt]) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/save_sharded_state.py
examples/offline_inference/save_sharded_state.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Saves each worker's model state dict directly to a checkpoint, which enables a fast load path for large tensor-parallel models where each worker only needs to read its own shard rather than the entire checkpoint. Example usage: python save_sharded_state.py \ --model /path/to/load \ --quantization deepspeedfp \ --tensor-parallel-size 8 \ --output /path/to/save Then, the model can be loaded with llm = LLM( model="/path/to/save", load_format="sharded_state", quantization="deepspeedfp", tensor_parallel_size=8, ) """ import dataclasses import os import shutil from pathlib import Path from vllm import LLM, EngineArgs from vllm.model_executor.model_loader import ShardedStateLoader from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() EngineArgs.add_cli_args(parser) parser.add_argument( "--output", "-o", required=True, type=str, help="path to output checkpoint" ) parser.add_argument( "--file-pattern", type=str, default=ShardedStateLoader.DEFAULT_PATTERN, help="string pattern of saved filenames", ) parser.add_argument( "--max-file-size", type=int, default=5 * 1024**3, help="max size (in bytes) of each safetensors file", ) return parser.parse_args() def main(args): engine_args = EngineArgs.from_cli_args(args) if engine_args.enable_lora: raise ValueError("Saving with enable_lora=True is not supported!") model_path = engine_args.model if not Path(model_path).is_dir(): raise ValueError("model path must be a local directory") # Create LLM instance from arguments llm = LLM(**dataclasses.asdict(engine_args)) # Prepare output directory Path(args.output).mkdir(exist_ok=True) # Dump worker states to output directory llm.llm_engine.engine_core.save_sharded_state( path=args.output, pattern=args.file_pattern, max_size=args.max_file_size ) # Copy metadata files to output directory for file in os.listdir(model_path): if os.path.splitext(file)[1] not in (".bin", ".pt", ".safetensors"): if os.path.isdir(os.path.join(model_path, file)): shutil.copytree( os.path.join(model_path, file), os.path.join(args.output, file) ) else: shutil.copy(os.path.join(model_path, file), args.output) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/chat_with_tools.py
examples/offline_inference/chat_with_tools.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa import json import random import string from vllm import LLM from vllm.sampling_params import SamplingParams # This script is an offline demo for function calling # # If you want to run a server/client setup, please follow this code: # # - Server: # # ```bash # vllm serve mistralai/Mistral-7B-Instruct-v0.3 --tokenizer-mode mistral --load-format mistral --config-format mistral # ``` # # - Client: # # ```bash # curl --location 'http://<your-node-url>:8000/v1/chat/completions' \ # --header 'Content-Type: application/json' \ # --header 'Authorization: Bearer token' \ # --data '{ # "model": "mistralai/Mistral-7B-Instruct-v0.3" # "messages": [ # { # "role": "user", # "content": [ # {"type" : "text", "text": "Describe this image in detail please."}, # {"type": "image_url", "image_url": {"url": "https://s3.amazonaws.com/cms.ipressroom.com/338/files/201808/5b894ee1a138352221103195_A680%7Ejogging-edit/A680%7Ejogging-edit_hero.jpg"}}, # {"type" : "text", "text": "and this one as well. Answer in French."}, # {"type": "image_url", "image_url": {"url": "https://www.wolframcloud.com/obj/resourcesystem/images/a0e/a0ee3983-46c6-4c92-b85d-059044639928/6af8cfb971db031b.png"}} # ] # } # ] # }' # ``` # # Usage: # python demo.py simple # python demo.py advanced model_name = "mistralai/Mistral-7B-Instruct-v0.3" # or switch to "mistralai/Mistral-Nemo-Instruct-2407" # or "mistralai/Mistral-Large-Instruct-2407" # or any other mistral model with function calling ability sampling_params = SamplingParams(max_tokens=8192, temperature=0.0) llm = LLM( model=model_name, tokenizer_mode="mistral", config_format="mistral", load_format="mistral", ) def generate_random_id(length=9): characters = string.ascii_letters + string.digits random_id = "".join(random.choice(characters) for _ in range(length)) return random_id # simulate an API that can be called def get_current_weather(city: str, state: str, unit: "str"): return ( f"The weather in {city}, {state} is 85 degrees {unit}. It is " "partly cloudly, with highs in the 90's." ) tool_functions = {"get_current_weather": get_current_weather} tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "city": { "type": "string", "description": "The city to find the weather for, e.g. 'San Francisco'", }, "state": { "type": "string", "description": "the two-letter abbreviation for the state that the city is" " in, e.g. 'CA' which would mean 'California'", }, "unit": { "type": "string", "description": "The unit to fetch the temperature in", "enum": ["celsius", "fahrenheit"], }, }, "required": ["city", "state", "unit"], }, }, } ] messages = [ { "role": "user", "content": "Can you tell me what the temperate will be in Dallas, in fahrenheit?", } ] outputs = llm.chat(messages, sampling_params=sampling_params, tools=tools) output = outputs[0].outputs[0].text.strip() # append the assistant message messages.append( { "role": "assistant", "content": output, } ) # let's now actually parse and execute the model's output simulating an API call by using the # above defined function tool_calls = json.loads(output) tool_answers = [ tool_functions[call["name"]](**call["arguments"]) for call in tool_calls ] # append the answer as a tool message and let the LLM give you an answer messages.append( { "role": "tool", "content": "\n\n".join(tool_answers), "tool_call_id": generate_random_id(), } ) outputs = llm.chat(messages, sampling_params, tools=tools) print(outputs[0].outputs[0].text.strip()) # yields # 'The weather in Dallas, TX is 85 degrees Fahrenheit. ' # 'It is partly cloudly, with highs in the 90's.'
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/torchrun_example.py
examples/offline_inference/torchrun_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ experimental support for tensor-parallel inference with torchrun, see https://github.com/vllm-project/vllm/issues/11400 for the motivation and use case for this example. run the script with `torchrun --nproc-per-node=2 torchrun_example.py`, the argument 2 should match the `tensor_parallel_size` below. see `tests/distributed/test_torchrun_example.py` for the unit test. """ import torch.distributed as dist from vllm import LLM, SamplingParams # Create prompts, the same across all ranks prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create sampling parameters, the same across all ranks sampling_params = SamplingParams(temperature=0.8, top_p=0.95) # Use `distributed_executor_backend="external_launcher"` so that # this llm engine/instance only creates one worker. # it is important to set an explicit seed to make sure that # all ranks have the same random seed, so that sampling can be # deterministic across ranks. llm = LLM( model="meta-llama/Llama-3.1-8B", tensor_parallel_size=2, pipeline_parallel_size=2, distributed_executor_backend="external_launcher", max_model_len=32768, seed=1, ) outputs = llm.generate(prompts, sampling_params) # all ranks will have the same outputs if dist.get_rank() == 0: print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}\n") print("-" * 50) """ Further tips: 1. to communicate control messages across all ranks, use the cpu group, a PyTorch ProcessGroup with GLOO backend. ```python from vllm.distributed.parallel_state import get_world_group cpu_group = get_world_group().cpu_group torch_rank = dist.get_rank(group=cpu_group) if torch_rank == 0: # do something for rank 0, e.g. saving the results to disk. ``` 2. to communicate data across all ranks, use the model's device group, a PyTorch ProcessGroup with NCCL backend. ```python from vllm.distributed.parallel_state import get_world_group device_group = get_world_group().device_group ``` 3. to access the model directly in every rank, use the following code: ```python llm.llm_engine.model_executor.driver_worker.worker.model_runner.model ``` """
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/spec_decode.py
examples/offline_inference/spec_decode.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from transformers import AutoTokenizer from vllm import LLM, SamplingParams from vllm.benchmarks.datasets import add_dataset_parser, get_samples from vllm.inputs import TokensPrompt from vllm.v1.metrics.reader import Counter, Vector try: from vllm.utils.argparse_utils import FlexibleArgumentParser except ImportError: from argparse import ArgumentParser as FlexibleArgumentParser QUESTION = "What is the content of each image?" IMAGE_URLS = [ "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/duck.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/lion.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/flycatcher.jpeg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/somefish.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/starfish.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/snail.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/thistle.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/husky.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/orangetabbycat.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/guineapig.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/rabbit.jpg", "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/horsepony.jpg", ] def get_custom_mm_prompts(num_prompts): prompts = [] for url in IMAGE_URLS: prompts.append( [ {"type": "image_url", "image_url": {"url": url}}, {"type": "text", "text": QUESTION}, ] ) if num_prompts > len(IMAGE_URLS): prompts = prompts * (num_prompts // len(IMAGE_URLS) + 1) return [[{"role": "user", "content": prompt}] for prompt in prompts[:num_prompts]] def parse_args(): parser = FlexibleArgumentParser() add_dataset_parser(parser) parser.add_argument("--test", action="store_true") parser.add_argument( "--method", type=str, default="eagle", choices=["ngram", "eagle", "eagle3", "mtp"], ) parser.add_argument("--num-spec-tokens", type=int, default=2) parser.add_argument("--prompt-lookup-max", type=int, default=5) parser.add_argument("--prompt-lookup-min", type=int, default=2) parser.add_argument("--tp", type=int, default=1) parser.add_argument("--enforce-eager", action="store_true") parser.add_argument("--enable-chunked-prefill", action="store_true") parser.add_argument("--max-model-len", type=int, default=16384) parser.add_argument("--temp", type=float, default=0) parser.add_argument("--top-p", type=float, default=1.0) parser.add_argument("--top-k", type=int, default=-1) parser.add_argument("--print-output", action="store_true") parser.add_argument("--output-len", type=int, default=256) parser.add_argument("--model-dir", type=str, default=None) parser.add_argument("--eagle-dir", type=str, default=None) parser.add_argument("--custom-mm-prompts", action="store_true") return parser.parse_args() def main(args): args.endpoint_type = "openai-chat" model_dir = args.model_dir if args.model_dir is None: if args.custom_mm_prompts: raise ValueError( "custom_mm_prompts requires mm based models" "default llama3.1-8b-instruct is not mm based" "please specify model_dir to give a mm based model" ) model_dir = "meta-llama/Llama-3.1-8B-Instruct" tokenizer = AutoTokenizer.from_pretrained(model_dir) args.custom_skip_chat_template = True if not args.custom_mm_prompts: prompts = get_samples(args, tokenizer) # add_special_tokens is False to avoid adding bos twice # when using chat templates prompt_ids = [ tokenizer.encode(prompt.prompt, add_special_tokens=False) for prompt in prompts ] else: prompts = get_custom_mm_prompts(args.num_prompts) if args.method == "eagle" or args.method == "eagle3": eagle_dir = args.eagle_dir if args.method == "eagle" and eagle_dir is None: eagle_dir = "yuhuili/EAGLE-LLaMA3.1-Instruct-8B" elif args.method == "eagle3" and eagle_dir is None: eagle_dir = "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B" speculative_config = { "method": args.method, "model": eagle_dir, "num_speculative_tokens": args.num_spec_tokens, } elif args.method == "ngram": speculative_config = { "method": "ngram", "num_speculative_tokens": args.num_spec_tokens, "prompt_lookup_max": args.prompt_lookup_max, "prompt_lookup_min": args.prompt_lookup_min, } elif args.method == "mtp": speculative_config = { "method": "mtp", "num_speculative_tokens": args.num_spec_tokens, } else: raise ValueError(f"unknown method: {args.method}") llm = LLM( model=model_dir, trust_remote_code=True, tensor_parallel_size=args.tp, enable_chunked_prefill=args.enable_chunked_prefill, enforce_eager=args.enforce_eager, gpu_memory_utilization=0.9, speculative_config=speculative_config, disable_log_stats=False, max_model_len=args.max_model_len, limit_mm_per_prompt={"image": 5}, disable_chunked_mm_input=True, ) sampling_params = SamplingParams(temperature=args.temp, max_tokens=args.output_len) if not args.custom_mm_prompts: outputs = llm.generate( [TokensPrompt(prompt_token_ids=x) for x in prompt_ids], sampling_params=sampling_params, ) else: outputs = llm.chat(prompts, sampling_params=sampling_params) # print the generated text if args.print_output: for output in outputs: print("-" * 50) print(f"prompt: {output.prompt}") print(f"generated text: {output.outputs[0].text}") print("-" * 50) metrics = llm.get_metrics() total_num_output_tokens = sum( len(output.outputs[0].token_ids) for output in outputs ) num_drafts = 0 num_draft_tokens = 0 num_accepted_tokens = 0 acceptance_counts = [0] * args.num_spec_tokens for metric in metrics: if metric.name == "vllm:spec_decode_num_drafts": assert isinstance(metric, Counter) num_drafts += metric.value elif metric.name == "vllm:spec_decode_num_draft_tokens": assert isinstance(metric, Counter) num_draft_tokens += metric.value elif metric.name == "vllm:spec_decode_num_accepted_tokens": assert isinstance(metric, Counter) num_accepted_tokens += metric.value elif metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos": assert isinstance(metric, Vector) for pos in range(len(metric.values)): acceptance_counts[pos] += metric.values[pos] print("-" * 50) print(f"total_num_output_tokens: {total_num_output_tokens}") print(f"num_drafts: {num_drafts}") print(f"num_draft_tokens: {num_draft_tokens}") print(f"num_accepted_tokens: {num_accepted_tokens}") acceptance_length = 1 + (num_accepted_tokens / num_drafts) if num_drafts > 0 else 1 print(f"mean acceptance length: {acceptance_length:.2f}") print("-" * 50) # print acceptance at each token position for i in range(len(acceptance_counts)): acceptance_rate = acceptance_counts[i] / num_drafts if num_drafts > 0 else 0 print(f"acceptance at token {i}: {acceptance_rate:.2f}") return acceptance_length if __name__ == "__main__": args = parse_args() acceptance_length = main(args) if args.test: # takes ~30s to run on 1xH100 assert args.method in ["eagle", "eagle3"] assert args.tp == 1 assert args.num_spec_tokens == 3 assert args.dataset_name == "hf" assert args.dataset_path == "philschmid/mt-bench" assert args.num_prompts == 80 assert args.temp == 0 assert args.top_p == 1.0 assert args.top_k == -1 assert args.enable_chunked_prefill # check acceptance length is within 2% of expected value rtol = 0.02 expected_acceptance_length = 2.296 if args.method == "eagle" else 2.811 assert ( acceptance_length <= (1 + rtol) * expected_acceptance_length and acceptance_length >= (1 - rtol) * expected_acceptance_length ), ( f"acceptance_length {acceptance_length} is not " f"within {rtol * 100}% of {expected_acceptance_length}" ) print( f"Test passed! Expected AL: " f"{expected_acceptance_length}, got {acceptance_length}" )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/encoder_decoder_multimodal.py
examples/offline_inference/encoder_decoder_multimodal.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use vLLM for running offline inference with the explicit/implicit prompt format on enc-dec LMMs for text generation. """ import os import time from collections.abc import Sequence from dataclasses import asdict from typing import NamedTuple from vllm import LLM, EngineArgs, PromptType, SamplingParams from vllm.assets.audio import AudioAsset from vllm.utils.argparse_utils import FlexibleArgumentParser class ModelRequestData(NamedTuple): engine_args: EngineArgs prompts: Sequence[PromptType] def run_whisper(): os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" engine_args = EngineArgs( model="openai/whisper-large-v3-turbo", max_model_len=448, max_num_seqs=16, limit_mm_per_prompt={"audio": 1}, dtype="half", ) prompts = [ { # Test implicit prompt "prompt": "<|startoftranscript|>", "multi_modal_data": { "audio": AudioAsset("mary_had_lamb").audio_and_sample_rate, }, }, { # Test explicit encoder/decoder prompt "encoder_prompt": { "prompt": "", "multi_modal_data": { "audio": AudioAsset("winning_call").audio_and_sample_rate, }, }, "decoder_prompt": "<|startoftranscript|>", }, ] return ModelRequestData( engine_args=engine_args, prompts=prompts, ) model_example_map = { "whisper": run_whisper, } def parse_args(): parser = FlexibleArgumentParser( description="Demo on using vLLM for offline inference with " "vision language models for text generation" ) parser.add_argument( "--model-type", "-m", type=str, default="whisper", choices=model_example_map.keys(), help='Huggingface "model_type".', ) parser.add_argument( "--seed", type=int, default=0, help="Set the seed when initializing `vllm.LLM`.", ) return parser.parse_args() def main(args): model = args.model_type if model not in model_example_map: raise ValueError(f"Model type {model} is not supported.") req_data = model_example_map[model]() # Disable other modalities to save memory default_limits = {"image": 0, "video": 0, "audio": 0} req_data.engine_args.limit_mm_per_prompt = default_limits | dict( req_data.engine_args.limit_mm_per_prompt or {} ) engine_args = asdict(req_data.engine_args) | {"seed": args.seed} llm = LLM(**engine_args) prompts = req_data.prompts # Create a sampling params object. sampling_params = SamplingParams( temperature=0, top_p=1.0, max_tokens=64, skip_special_tokens=False, ) start = time.time() # Generate output tokens from the prompts. The output is a list of # RequestOutput objects that contain the prompt, generated # text, and other information. outputs = llm.generate(prompts, sampling_params) # Print the outputs. for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Decoder prompt: {prompt!r}, Generated text: {generated_text!r}") duration = time.time() - start print("Duration:", duration) print("RPS:", len(prompts) / duration) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/automatic_prefix_caching.py
examples/offline_inference/automatic_prefix_caching.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Demonstration script for Automatic Prefix Caching (APC) in vLLM. Automatic Prefix Caching (APC) allows the vLLM engine to reuse cached KV (key-value) pairs from previous prompts if a new query shares the same prefix. This reduces redundant computation and improves inference speed. To enable APC, set `enable_prefix_caching=True` when initializing the vLLM engine. This script uses a long Markdown table as the shared prompt prefix and compares the generation time for two queries that share the same prefix but ask different questions. Run: python examples/offline_inference/automatic_prefix_caching.py """ import time from vllm import LLM, SamplingParams # ruff: noqa: E501 # A prompt containing a large markdown table. The table is randomly generated by GPT-4. LONG_PROMPT = ( "You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as follows.\n# Table\n" + """ | ID | Name | Age | Occupation | Country | Email | Phone Number | Address | |-----|---------------|-----|---------------|---------------|------------------------|----------------|------------------------------| | 1 | John Doe | 29 | Engineer | USA | john.doe@example.com | 555-1234 | 123 Elm St, Springfield, IL | | 2 | Jane Smith | 34 | Doctor | Canada | jane.smith@example.com | 555-5678 | 456 Oak St, Toronto, ON | | 3 | Alice Johnson | 27 | Teacher | UK | alice.j@example.com | 555-8765 | 789 Pine St, London, UK | | 4 | Bob Brown | 45 | Artist | Australia | bob.b@example.com | 555-4321 | 321 Maple St, Sydney, NSW | | 5 | Carol White | 31 | Scientist | New Zealand | carol.w@example.com | 555-6789 | 654 Birch St, Wellington, NZ | | 6 | Dave Green | 28 | Lawyer | Ireland | dave.g@example.com | 555-3456 | 987 Cedar St, Dublin, IE | | 7 | Emma Black | 40 | Musician | USA | emma.b@example.com | 555-1111 | 246 Ash St, New York, NY | | 8 | Frank Blue | 37 | Chef | Canada | frank.b@example.com | 555-2222 | 135 Spruce St, Vancouver, BC | | 9 | Grace Yellow | 50 | Engineer | UK | grace.y@example.com | 555-3333 | 864 Fir St, Manchester, UK | | 10 | Henry Violet | 32 | Artist | Australia | henry.v@example.com | 555-4444 | 753 Willow St, Melbourne, VIC| | 11 | Irene Orange | 26 | Scientist | New Zealand | irene.o@example.com | 555-5555 | 912 Poplar St, Auckland, NZ | | 12 | Jack Indigo | 38 | Teacher | Ireland | jack.i@example.com | 555-6666 | 159 Elm St, Cork, IE | | 13 | Karen Red | 41 | Lawyer | USA | karen.r@example.com | 555-7777 | 357 Cedar St, Boston, MA | | 14 | Leo Brown | 30 | Chef | Canada | leo.b@example.com | 555-8888 | 246 Oak St, Calgary, AB | | 15 | Mia Green | 33 | Musician | UK | mia.g@example.com | 555-9999 | 975 Pine St, Edinburgh, UK | | 16 | Noah Yellow | 29 | Doctor | Australia | noah.y@example.com | 555-0000 | 864 Birch St, Brisbane, QLD | | 17 | Olivia Blue | 35 | Engineer | New Zealand | olivia.b@example.com | 555-1212 | 753 Maple St, Hamilton, NZ | | 18 | Peter Black | 42 | Artist | Ireland | peter.b@example.com | 555-3434 | 912 Fir St, Limerick, IE | | 19 | Quinn White | 28 | Scientist | USA | quinn.w@example.com | 555-5656 | 159 Willow St, Seattle, WA | | 20 | Rachel Red | 31 | Teacher | Canada | rachel.r@example.com | 555-7878 | 357 Poplar St, Ottawa, ON | | 21 | Steve Green | 44 | Lawyer | UK | steve.g@example.com | 555-9090 | 753 Elm St, Birmingham, UK | | 22 | Tina Blue | 36 | Musician | Australia | tina.b@example.com | 555-1213 | 864 Cedar St, Perth, WA | | 23 | Umar Black | 39 | Chef | New Zealand | umar.b@example.com | 555-3435 | 975 Spruce St, Christchurch, NZ| | 24 | Victor Yellow | 43 | Engineer | Ireland | victor.y@example.com | 555-5657 | 246 Willow St, Galway, IE | | 25 | Wendy Orange | 27 | Artist | USA | wendy.o@example.com | 555-7879 | 135 Elm St, Denver, CO | | 26 | Xavier Green | 34 | Scientist | Canada | xavier.g@example.com | 555-9091 | 357 Oak St, Montreal, QC | | 27 | Yara Red | 41 | Teacher | UK | yara.r@example.com | 555-1214 | 975 Pine St, Leeds, UK | | 28 | Zack Blue | 30 | Lawyer | Australia | zack.b@example.com | 555-3436 | 135 Birch St, Adelaide, SA | | 29 | Amy White | 33 | Musician | New Zealand | amy.w@example.com | 555-5658 | 159 Maple St, Wellington, NZ | | 30 | Ben Black | 38 | Chef | Ireland | ben.b@example.com | 555-7870 | 246 Fir St, Waterford, IE | """ ) def get_generation_time(llm, sampling_params, prompts): # time the generation start_time = time.time() output = llm.generate(prompts, sampling_params=sampling_params) end_time = time.time() # print the output and generation time print("-" * 30) print(f"Output: {output[0].outputs[0].text}") print(f"Generation time: {end_time - start_time} seconds.") print("-" * 30) def main(): # set enable_prefix_caching=True to enable APC llm = LLM(model="lmsys/longchat-13b-16k", enable_prefix_caching=True) sampling_params = SamplingParams(temperature=0, max_tokens=100) # Querying the age of John Doe get_generation_time( llm, sampling_params, LONG_PROMPT + "Question: what is the age of John Doe? Your answer: The age of John Doe is ", ) # Querying the age of Zack Blue # This query will be faster since vllm avoids computing the KV cache of LONG_PROMPT again. get_generation_time( llm, sampling_params, LONG_PROMPT + "Question: what is the age of Zack Blue? Your answer: The age of Zack Blue is ", ) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/simple_profiling.py
examples/offline_inference/simple_profiling.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import time from vllm import LLM, SamplingParams # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a sampling params object. sampling_params = SamplingParams(temperature=0.8, top_p=0.95) def main(): # Create an LLM. llm = LLM( model="facebook/opt-125m", tensor_parallel_size=1, profiler_config={ "profiler": "torch", "torch_profiler_dir": "./vllm_profile", }, ) llm.start_profile() # Generate texts from the prompts. The output is a list of RequestOutput # objects that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) llm.stop_profile() # Print the outputs. print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) # Add a buffer to wait for profiler in the background process # (in case MP is on) to finish writing profiling output. time.sleep(10) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/audio_language.py
examples/offline_inference/audio_language.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use vLLM for running offline inference with the correct prompt format on audio language models. For most models, the prompt format should follow corresponding examples on HuggingFace model repository. """ import os from dataclasses import asdict from typing import Any, NamedTuple from huggingface_hub import snapshot_download from transformers import AutoTokenizer from vllm import LLM, EngineArgs, SamplingParams from vllm.assets.audio import AudioAsset from vllm.lora.request import LoRARequest from vllm.utils.argparse_utils import FlexibleArgumentParser audio_assets = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")] question_per_audio_count = { 0: "What is 1+1?", 1: "What is recited in the audio?", 2: "What sport and what nursery rhyme are referenced?", } class ModelRequestData(NamedTuple): engine_args: EngineArgs prompt: str | None = None prompt_token_ids: dict[str, list[int]] | None = None multi_modal_data: dict[str, Any] | None = None stop_token_ids: list[int] | None = None lora_requests: list[LoRARequest] | None = None # NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on # lower-end GPUs. # Unless specified, these settings have been tested to work on a single L4. # AudioFlamingo3 def run_audioflamingo3(question: str, audio_count: int) -> ModelRequestData: model_name = "nvidia/audio-flamingo-3-hf" engine_args = EngineArgs( model=model_name, max_model_len=4096, max_num_seqs=2, limit_mm_per_prompt={"audio": audio_count}, enforce_eager=True, ) # AudioFlamingo3 uses <sound> token for audio audio_placeholder = "<sound>" * audio_count prompt = ( "<|im_start|>system\n" "You are a helpful assistant.<|im_end|>\n" "<|im_start|>user\n" f"{audio_placeholder}{question}<|im_end|>\n" "<|im_start|>assistant\n" ) return ModelRequestData( engine_args=engine_args, prompt=prompt, ) # Gemma3N def run_gemma3n(question: str, audio_count: int) -> ModelRequestData: model_name = "google/gemma-3n-E2B-it" engine_args = EngineArgs( model=model_name, max_model_len=2048, max_num_batched_tokens=2048, max_num_seqs=2, limit_mm_per_prompt={"audio": audio_count}, enforce_eager=True, ) prompt = f"<start_of_turn>user\n<audio_soft_token>{question}" "<end_of_turn>\n<start_of_turn>model\n" return ModelRequestData( engine_args=engine_args, prompt=prompt, ) # Granite Speech def run_granite_speech(question: str, audio_count: int) -> ModelRequestData: # NOTE - the setting in this example are somewhat different from what is # optimal for granite speech, and it is generally recommended to use beam # search. Check the model README for suggested settings. # https://huggingface.co/ibm-granite/granite-speech-3.3-8b model_name = "ibm-granite/granite-speech-3.3-8b" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=2048, max_num_seqs=2, enable_lora=True, max_lora_rank=64, limit_mm_per_prompt={"audio": audio_count}, ) # The model has an audio-specific lora directly in its model dir; # it should be enabled whenever you pass audio inputs to the model. speech_lora_path = model_name audio_placeholder = "<|audio|>" * audio_count prompts = f"<|start_of_role|>system<|end_of_role|>Knowledge Cutoff Date: April 2024.\nToday's Date: December 19, 2024.\nYou are Granite, developed by IBM. You are a helpful AI assistant<|end_of_text|>\n<|start_of_role|>user<|end_of_role|>{audio_placeholder}{question}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>" # noqa: E501 return ModelRequestData( engine_args=engine_args, prompt=prompts, lora_requests=[LoRARequest("speech", 1, speech_lora_path)], ) # MiDashengLM def run_midashenglm(question: str, audio_count: int): model_name = "mispeech/midashenglm-7b" engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=4096, max_num_seqs=5, limit_mm_per_prompt={"audio": audio_count}, ) audio_in_prompt = "".join( ["<|audio_bos|><|AUDIO|><|audio_eos|>" for idx in range(audio_count)] ) default_system = "You are a helpful language and speech assistant." prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n" f"{audio_in_prompt}{question}<|im_end|>\n" "<|im_start|>assistant\n" ) return ModelRequestData( engine_args=engine_args, prompt=prompt, ) # MiniCPM-O def run_minicpmo(question: str, audio_count: int) -> ModelRequestData: model_name = "openbmb/MiniCPM-o-2_6" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=4096, max_num_seqs=2, limit_mm_per_prompt={"audio": audio_count}, ) stop_tokens = ["<|im_end|>", "<|endoftext|>"] stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] audio_placeholder = "(<audio>./</audio>)" * audio_count audio_chat_template = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n<|spk_bos|><|spk|><|spk_eos|><|tts_bos|>' }}{% endif %}" # noqa: E501 messages = [{"role": "user", "content": f"{audio_placeholder}\n{question}"}] prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True, chat_template=audio_chat_template, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, stop_token_ids=stop_token_ids, ) # Phi-4-multimodal-instruct def run_phi4mm(question: str, audio_count: int) -> ModelRequestData: """ Phi-4-multimodal-instruct supports both image and audio inputs. Here, we show how to process audio inputs. """ model_path = snapshot_download("microsoft/Phi-4-multimodal-instruct") # Since the vision-lora and speech-lora co-exist with the base model, # we have to manually specify the path of the lora weights. speech_lora_path = os.path.join(model_path, "speech-lora") placeholders = "".join([f"<|audio_{i + 1}|>" for i in range(audio_count)]) prompts = f"<|user|>{placeholders}{question}<|end|><|assistant|>" engine_args = EngineArgs( model=model_path, trust_remote_code=True, max_model_len=12800, max_num_seqs=2, enable_lora=True, max_lora_rank=320, limit_mm_per_prompt={"audio": audio_count}, ) return ModelRequestData( engine_args=engine_args, prompt=prompts, lora_requests=[LoRARequest("speech", 1, speech_lora_path)], ) # Qwen2-Audio def run_qwen2_audio(question: str, audio_count: int) -> ModelRequestData: model_name = "Qwen/Qwen2-Audio-7B-Instruct" engine_args = EngineArgs( model=model_name, max_model_len=4096, max_num_seqs=5, limit_mm_per_prompt={"audio": audio_count}, ) audio_in_prompt = "".join( [ f"Audio {idx + 1}: <|audio_bos|><|AUDIO|><|audio_eos|>\n" for idx in range(audio_count) ] ) prompt = ( "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" "<|im_start|>user\n" f"{audio_in_prompt}{question}<|im_end|>\n" "<|im_start|>assistant\n" ) return ModelRequestData( engine_args=engine_args, prompt=prompt, ) # Qwen2.5-Omni def run_qwen2_5_omni(question: str, audio_count: int): model_name = "Qwen/Qwen2.5-Omni-7B" engine_args = EngineArgs( model=model_name, max_model_len=4096, max_num_seqs=5, limit_mm_per_prompt={"audio": audio_count}, ) audio_in_prompt = "".join( ["<|audio_bos|><|AUDIO|><|audio_eos|>\n" for idx in range(audio_count)] ) default_system = ( "You are Qwen, a virtual human developed by the Qwen Team, Alibaba " "Group, capable of perceiving auditory and visual inputs, as well as " "generating text and speech." ) prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n" f"{audio_in_prompt}{question}<|im_end|>\n" "<|im_start|>assistant\n" ) return ModelRequestData( engine_args=engine_args, prompt=prompt, ) # Ultravox 0.5-1B def run_ultravox(question: str, audio_count: int) -> ModelRequestData: model_name = "fixie-ai/ultravox-v0_5-llama-3_2-1b" tokenizer = AutoTokenizer.from_pretrained(model_name) messages = [{"role": "user", "content": "<|audio|>\n" * audio_count + question}] prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) engine_args = EngineArgs( model=model_name, max_model_len=4096, max_num_seqs=5, trust_remote_code=True, limit_mm_per_prompt={"audio": audio_count}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, ) # Voxtral # Make sure to install mistral-common[audio]. def run_voxtral(question: str, audio_count: int) -> ModelRequestData: from mistral_common.audio import Audio from mistral_common.protocol.instruct.chunk import ( AudioChunk, RawAudio, TextChunk, ) from mistral_common.protocol.instruct.messages import ( UserMessage, ) from mistral_common.protocol.instruct.request import ChatCompletionRequest from mistral_common.tokens.tokenizers.mistral import MistralTokenizer model_name = "mistralai/Voxtral-Mini-3B-2507" tokenizer = MistralTokenizer.from_hf_hub(model_name) engine_args = EngineArgs( model=model_name, max_model_len=8192, max_num_seqs=2, limit_mm_per_prompt={"audio": audio_count}, config_format="mistral", load_format="mistral", tokenizer_mode="mistral", enforce_eager=True, enable_chunked_prefill=False, ) text_chunk = TextChunk(text=question) audios = [ Audio.from_file(str(audio_assets[i].get_local_path()), strict=False) for i in range(audio_count) ] audio_chunks = [ AudioChunk(input_audio=RawAudio.from_audio(audio)) for audio in audios ] messages = [UserMessage(content=[*audio_chunks, text_chunk])] req = ChatCompletionRequest(messages=messages, model=model_name) tokens = tokenizer.encode_chat_completion(req) prompt_ids, audios = tokens.tokens, tokens.audios audios_and_sr = [(au.audio_array, au.sampling_rate) for au in audios] multi_modal_data = {"audio": audios_and_sr} return ModelRequestData( engine_args=engine_args, prompt_token_ids=prompt_ids, multi_modal_data=multi_modal_data, ) # GLM-ASR def run_glmasr(question: str, audio_count: int) -> ModelRequestData: model_name = "zai-org/GLM-ASR-Nano-2512" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) # GLM-ASR uses <|pad|> token for audio audio_placeholder = "<|pad|>" * audio_count messages = [{"role": "user", "content": f"{audio_placeholder}{question}"}] prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) engine_args = EngineArgs( model=model_name, trust_remote_code=True, max_model_len=4096, max_num_seqs=2, limit_mm_per_prompt={"audio": audio_count}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, ) # Whisper def run_whisper(question: str, audio_count: int) -> ModelRequestData: assert audio_count == 1, "Whisper only support single audio input per prompt" model_name = "openai/whisper-large-v3-turbo" prompt = "<|startoftranscript|>" engine_args = EngineArgs( model=model_name, max_model_len=448, max_num_seqs=5, limit_mm_per_prompt={"audio": audio_count}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, ) model_example_map = { "audioflamingo3": run_audioflamingo3, "gemma3n": run_gemma3n, "glmasr": run_glmasr, "granite_speech": run_granite_speech, "midashenglm": run_midashenglm, "minicpmo": run_minicpmo, "phi4_mm": run_phi4mm, "qwen2_audio": run_qwen2_audio, "qwen2_5_omni": run_qwen2_5_omni, "ultravox": run_ultravox, "voxtral": run_voxtral, "whisper": run_whisper, } def parse_args(): parser = FlexibleArgumentParser( description="Demo on using vLLM for offline inference with " "audio language models" ) parser.add_argument( "--model-type", "-m", type=str, default="ultravox", choices=model_example_map.keys(), help='Huggingface "model_type".', ) parser.add_argument( "--num-prompts", type=int, default=1, help="Number of prompts to run." ) parser.add_argument( "--num-audios", type=int, default=1, choices=[0, 1, 2], help="Number of audio items per prompt.", ) parser.add_argument( "--seed", type=int, default=0, help="Set the seed when initializing `vllm.LLM`.", ) parser.add_argument( "--tensor-parallel-size", "-tp", type=int, default=None, help="Tensor parallel size to override the model's default setting. ", ) return parser.parse_args() def main(args): model = args.model_type if model not in model_example_map: raise ValueError(f"Model type {model} is not supported.") if args.tensor_parallel_size is not None and args.tensor_parallel_size < 1: raise ValueError( f"tensor_parallel_size must be a positive integer, " f"got {args.tensor_parallel_size}" ) audio_count = args.num_audios req_data = model_example_map[model]( question_per_audio_count[audio_count], audio_count ) # Disable other modalities to save memory default_limits = {"image": 0, "video": 0, "audio": 0} req_data.engine_args.limit_mm_per_prompt = default_limits | dict( req_data.engine_args.limit_mm_per_prompt or {} ) engine_args = asdict(req_data.engine_args) | {"seed": args.seed} if args.tensor_parallel_size is not None: engine_args["tensor_parallel_size"] = args.tensor_parallel_size llm = LLM(**engine_args) # We set temperature to 0.2 so that outputs can be different # even when all prompts are identical when running batch inference. sampling_params = SamplingParams( temperature=0.2, max_tokens=64, stop_token_ids=req_data.stop_token_ids ) def get_input(start, end): mm_data = req_data.multi_modal_data if not mm_data: mm_data = {} if end - start > 0: mm_data = { "audio": [ asset.audio_and_sample_rate for asset in audio_assets[start:end] ] } inputs = {"multi_modal_data": mm_data} if req_data.prompt: inputs["prompt"] = req_data.prompt else: inputs["prompt_token_ids"] = req_data.prompt_token_ids return inputs # Batch inference assert args.num_prompts > 0 if audio_count != 1: inputs = get_input(0, audio_count) inputs = [inputs] * args.num_prompts else: # For single audio input, we need to vary the audio input # to avoid deduplication in vLLM engine. inputs = [] for i in range(args.num_prompts): start = i % len(audio_assets) inp = get_input(start, start + 1) inputs.append(inp) # Add LoRA request if applicable lora_request = ( req_data.lora_requests * args.num_prompts if req_data.lora_requests else None ) outputs = llm.generate( inputs, sampling_params=sampling_params, lora_request=lora_request, ) for o in outputs: generated_text = o.outputs[0].text print(generated_text) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/structured_outputs.py
examples/offline_inference/structured_outputs.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file demonstrates the example usage of structured outputs in vLLM. It shows how to apply different constraints such as choice, regex, json schema, and grammar to produce structured and formatted results based on specific prompts. """ from enum import Enum from pydantic import BaseModel from vllm import LLM, SamplingParams from vllm.sampling_params import StructuredOutputsParams MAX_TOKENS = 50 # Structured outputs by Choice (list of possible options) structured_outputs_params_choice = StructuredOutputsParams( choice=["Positive", "Negative"] ) sampling_params_choice = SamplingParams( structured_outputs=structured_outputs_params_choice ) prompt_choice = "Classify this sentiment: vLLM is wonderful!" # Structured outputs by Regex structured_outputs_params_regex = StructuredOutputsParams(regex=r"\w+@\w+\.com\n") sampling_params_regex = SamplingParams( structured_outputs=structured_outputs_params_regex, stop=["\n"], max_tokens=MAX_TOKENS, ) prompt_regex = ( "Generate an email address for Alan Turing, who works in Enigma." "End in .com and new line. Example result:" "alan.turing@enigma.com\n" ) # Structured outputs by JSON using Pydantic schema class CarType(str, Enum): sedan = "sedan" suv = "SUV" truck = "Truck" coupe = "Coupe" class CarDescription(BaseModel): brand: str model: str car_type: CarType json_schema = CarDescription.model_json_schema() structured_outputs_params_json = StructuredOutputsParams(json=json_schema) sampling_params_json = SamplingParams( structured_outputs=structured_outputs_params_json, max_tokens=MAX_TOKENS ) prompt_json = ( "Generate a JSON with the brand, model and car_type of " "the most iconic car from the 90's" ) # Structured outputs by Grammar simplified_sql_grammar = """ root ::= select_statement select_statement ::= "SELECT " column " from " table " where " condition column ::= "col_1 " | "col_2 " table ::= "table_1 " | "table_2 " condition ::= column "= " number number ::= "1 " | "2 " """ structured_outputs_params_grammar = StructuredOutputsParams( grammar=simplified_sql_grammar ) sampling_params_grammar = SamplingParams( structured_outputs=structured_outputs_params_grammar, max_tokens=MAX_TOKENS, ) prompt_grammar = ( "Generate an SQL query to show the 'username' and 'email' from the 'users' table." ) def format_output(title: str, output: str): print(f"{'-' * 50}\n{title}: {output}\n{'-' * 50}") def generate_output(prompt: str, sampling_params: SamplingParams, llm: LLM): outputs = llm.generate(prompt, sampling_params=sampling_params) return outputs[0].outputs[0].text def main(): llm = LLM(model="Qwen/Qwen2.5-3B-Instruct", max_model_len=100) choice_output = generate_output(prompt_choice, sampling_params_choice, llm) format_output("Structured outputs by Choice", choice_output) regex_output = generate_output(prompt_regex, sampling_params_regex, llm) format_output("Structured outputs by Regex", regex_output) json_output = generate_output(prompt_json, sampling_params_json, llm) format_output("Structured outputs by JSON", json_output) grammar_output = generate_output(prompt_grammar, sampling_params_grammar, llm) format_output("Structured outputs by Grammar", grammar_output) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/context_extension.py
examples/offline_inference/context_extension.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This script demonstrates how to extend the context length of a Qwen model using the YARN method (rope_parameters) and run a simple chat example. Usage: python examples/offline_inference/context_extension.py """ from vllm import LLM, SamplingParams def create_llm(): rope_theta = 1000000 original_max_position_embeddings = 32768 factor = 4.0 # Use yarn to extend context hf_overrides = { "rope_parameters": { "rope_theta": rope_theta, "rope_type": "yarn", "factor": factor, "original_max_position_embeddings": original_max_position_embeddings, }, "max_model_len": int(original_max_position_embeddings * factor), } llm = LLM(model="Qwen/Qwen3-0.6B", hf_overrides=hf_overrides) return llm def run_llm_chat(llm): sampling_params = SamplingParams( temperature=0.8, top_p=0.95, max_tokens=128, ) conversation = [ {"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hello! How can I assist you today?"}, ] outputs = llm.chat(conversation, sampling_params, use_tqdm=False) return outputs def print_outputs(outputs): print("\nGenerated Outputs:\n" + "-" * 80) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\n") print(f"Generated text: {generated_text!r}") print("-" * 80) def main(): llm = create_llm() outputs = run_llm_chat(llm) print_outputs(outputs) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/prompt_embed_inference.py
examples/offline_inference/prompt_embed_inference.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Demonstrates how to generate prompt embeddings using Hugging Face Transformers and use them as input to vLLM for both single and batch inference. Model: meta-llama/Llama-3.2-1B-Instruct Note: This model is gated on Hugging Face Hub. You must request access to use it: https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct Requirements: - vLLM - transformers Run: python examples/offline_inference/prompt_embed_inference.py """ import torch from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizer from vllm import LLM def init_tokenizer_and_llm(model_name: str): tokenizer = AutoTokenizer.from_pretrained(model_name) transformers_model = AutoModelForCausalLM.from_pretrained(model_name) embedding_layer = transformers_model.get_input_embeddings() llm = LLM(model=model_name, enable_prompt_embeds=True) return tokenizer, embedding_layer, llm def get_prompt_embeds( chat: list[dict[str, str]], tokenizer: PreTrainedTokenizer, embedding_layer: torch.nn.Module, ): token_ids = tokenizer.apply_chat_template( chat, add_generation_prompt=True, return_tensors="pt" ) prompt_embeds = embedding_layer(token_ids).squeeze(0) return prompt_embeds def single_prompt_inference( llm: LLM, tokenizer: PreTrainedTokenizer, embedding_layer: torch.nn.Module ): chat = [{"role": "user", "content": "Please tell me about the capital of France."}] prompt_embeds = get_prompt_embeds(chat, tokenizer, embedding_layer) outputs = llm.generate( { "prompt_embeds": prompt_embeds, } ) print("\n[Single Inference Output]") print("-" * 30) for o in outputs: print(o.outputs[0].text) print("-" * 30) def batch_prompt_inference( llm: LLM, tokenizer: PreTrainedTokenizer, embedding_layer: torch.nn.Module ): chats = [ [{"role": "user", "content": "Please tell me about the capital of France."}], [{"role": "user", "content": "When is the day longest during the year?"}], [{"role": "user", "content": "Where is bigger, the moon or the sun?"}], ] prompt_embeds_list = [ get_prompt_embeds(chat, tokenizer, embedding_layer) for chat in chats ] outputs = llm.generate([{"prompt_embeds": embeds} for embeds in prompt_embeds_list]) print("\n[Batch Inference Outputs]") print("-" * 30) for i, o in enumerate(outputs): print(f"Q{i + 1}: {chats[i][0]['content']}") print(f"A{i + 1}: {o.outputs[0].text}\n") print("-" * 30) def main(): model_name = "meta-llama/Llama-3.2-1B-Instruct" tokenizer, embedding_layer, llm = init_tokenizer_and_llm(model_name) single_prompt_inference(llm, tokenizer, embedding_layer) batch_prompt_inference(llm, tokenizer, embedding_layer) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/reproducibility.py
examples/offline_inference/reproducibility.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Demonstrates how to achieve reproducibility in vLLM. Main article: https://docs.vllm.ai/en/latest/usage/reproducibility.html """ import os import random from vllm import LLM, SamplingParams # Either: ## Turn off multiprocessing to make the scheduling deterministic, or os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" ## Enable batch invariance to get consistent results regardless of scheduling. os.environ["VLLM_BATCH_INVARIANT"] = "1" prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] sampling_params = SamplingParams(temperature=0.8, top_p=0.95) def main(): llm = LLM(model="facebook/opt-125m") outputs = llm.generate(prompts, sampling_params) print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) # Try generating random numbers outside vLLM # The same number is output across runs, meaning that the random state # in the user code has been updated by vLLM print(random.randint(0, 100)) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/mistral-small.py
examples/offline_inference/mistral-small.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa import argparse from vllm import LLM from vllm.sampling_params import SamplingParams from vllm.assets.image import ImageAsset # This script is an offline demo for running Mistral-Small-3.1 # # If you want to run a server/client setup, please follow this code: # # - Server: # # ```bash # # Mistral format # vllm serve mistralai/Mistral-Small-3.1-24B-Instruct-2503 \ # --tokenizer-mode mistral --config-format mistral --load-format mistral \ # --limit-mm-per-prompt '{"image":4}' --max-model-len 16384 # # # HF format # vllm serve mistralai/Mistral-Small-3.1-24B-Instruct-2503 \ # --limit-mm-per-prompt '{"image":4}' --max-model-len 16384 # ``` # # - Client: # # ```bash # curl --location 'http://<your-node-url>:8000/v1/chat/completions' \ # --header 'Content-Type: application/json' \ # --header 'Authorization: Bearer token' \ # --data '{ # "model": "mistralai/Mistral-Small-3.1-24B-Instruct-2503", # "messages": [ # { # "role": "user", # "content": [ # {"type" : "text", "text": "Describe this image in detail please."}, # {"type": "image_url", "image_url": {"url": "https://s3.amazonaws.com/cms.ipressroom.com/338/files/201808/5b894ee1a138352221103195_A680%7Ejogging-edit/A680%7Ejogging-edit_hero.jpg"}}, # {"type" : "text", "text": "and this one as well. Answer in French."}, # {"type": "image_url", "image_url": {"url": "https://www.wolframcloud.com/obj/resourcesystem/images/a0e/a0ee3983-46c6-4c92-b85d-059044639928/6af8cfb971db031b.png"}} # ] # } # ] # }' # ``` # # Usage: # python demo.py simple # python demo.py advanced # Lower max_model_len and/or max_num_seqs on low-VRAM GPUs. # These scripts have been tested on 2x L40 GPUs def run_simple_demo(args: argparse.Namespace): model_name = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" sampling_params = SamplingParams(max_tokens=8192) llm = LLM( model=model_name, tokenizer_mode="mistral" if args.format == "mistral" else "auto", config_format="mistral" if args.format == "mistral" else "auto", load_format="mistral" if args.format == "mistral" else "auto", limit_mm_per_prompt={"image": 1}, max_model_len=4096, max_num_seqs=2, tensor_parallel_size=2, mm_processor_cache_gb=0 if args.disable_mm_processor_cache else 4, ) prompt = "Describe this image in one sentence." messages = [ { "role": "user", "content": [ {"type": "text", "text": prompt}, { "type": "image_pil", "image_pil": ImageAsset("cherry_blossom").pil_image, }, ], }, ] outputs = llm.chat(messages, sampling_params=sampling_params) print("-" * 50) print(outputs[0].outputs[0].text) print("-" * 50) def run_advanced_demo(args: argparse.Namespace): model_name = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" max_img_per_msg = 3 max_tokens_per_img = 4096 sampling_params = SamplingParams(max_tokens=8192, temperature=0.7) llm = LLM( model=model_name, tokenizer_mode="mistral" if args.format == "mistral" else "auto", config_format="mistral" if args.format == "mistral" else "auto", load_format="mistral" if args.format == "mistral" else "auto", limit_mm_per_prompt={"image": max_img_per_msg}, max_model_len=max_img_per_msg * max_tokens_per_img, tensor_parallel_size=2, mm_processor_cache_gb=0 if args.disable_mm_processor_cache else 4, ) prompt = "Describe the following image." url_1 = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png" url_2 = "https://picsum.photos/seed/picsum/200/300" url_3 = "https://picsum.photos/id/32/512/512" messages = [ { "role": "user", "content": [ {"type": "text", "text": prompt}, {"type": "image_url", "image_url": {"url": url_1}}, {"type": "image_url", "image_url": {"url": url_2}}, ], }, { "role": "assistant", "content": "The images show nature.", }, { "role": "user", "content": "More details please and answer only in French!.", }, { "role": "user", "content": [ {"type": "image_url", "image_url": {"url": url_3}}, ], }, ] outputs = llm.chat(messages=messages, sampling_params=sampling_params) print("-" * 50) print(outputs[0].outputs[0].text) print("-" * 50) def parse_args(): parser = argparse.ArgumentParser( description="Run a demo in simple or advanced mode." ) parser.add_argument( "mode", choices=["simple", "advanced"], help="Specify the demo mode: 'simple' or 'advanced'", ) parser.add_argument( "--format", choices=["mistral", "hf"], default="mistral", help="Specify the format of the model to load.", ) parser.add_argument( "--disable-mm-processor-cache", action="store_true", help="If True, disables caching of multi-modal processor.", ) return parser.parse_args() def main(): args = parse_args() if args.mode == "simple": print("Running simple demo...") run_simple_demo(args) elif args.mode == "advanced": print("Running advanced demo...") run_advanced_demo(args) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/multilora_inference.py
examples/offline_inference/multilora_inference.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use the multi-LoRA functionality for offline inference. Requires HuggingFace credentials for access to Llama2. """ from huggingface_hub import snapshot_download from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams from vllm.lora.request import LoRARequest def create_test_prompts( lora_path: str, ) -> list[tuple[str, SamplingParams, LoRARequest | None]]: """Create a list of test prompts with their sampling parameters. 2 requests for base model, 4 requests for the LoRA. We define 2 different LoRA adapters (using the same model for demo purposes). Since we also set `max_loras=1`, the expectation is that the requests with the second LoRA adapter will be run after all requests with the first adapter have finished. """ return [ ( "A robot may not injure a human being", SamplingParams(temperature=0.0, logprobs=1, max_tokens=128), None, ), ( "To be or not to be,", SamplingParams( temperature=0.8, top_k=5, presence_penalty=0.2, max_tokens=128 ), None, ), ( "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 SamplingParams(temperature=0.0, logprobs=1, max_tokens=128), LoRARequest("sql-lora", 1, lora_path), ), ( "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 SamplingParams(temperature=0.0, logprobs=1, max_tokens=128), LoRARequest("sql-lora2", 2, lora_path), ), ] def process_requests( engine: LLMEngine, test_prompts: list[tuple[str, SamplingParams, LoRARequest | None]], ): """Continuously process a list of prompts and handle the outputs.""" request_id = 0 print("-" * 50) while test_prompts or engine.has_unfinished_requests(): if test_prompts: prompt, sampling_params, lora_request = test_prompts.pop(0) engine.add_request( str(request_id), prompt, sampling_params, lora_request=lora_request ) request_id += 1 request_outputs: list[RequestOutput] = engine.step() for request_output in request_outputs: if request_output.finished: print(request_output) print("-" * 50) def initialize_engine() -> LLMEngine: """Initialize the LLMEngine.""" # max_loras: controls the number of LoRAs that can be used in the same # batch. Larger numbers will cause higher memory usage, as each LoRA # slot requires its own preallocated tensor. # max_lora_rank: controls the maximum supported rank of all LoRAs. Larger # numbers will cause higher memory usage. If you know that all LoRAs will # use the same rank, it is recommended to set this as low as possible. # max_cpu_loras: controls the size of the CPU LoRA cache. engine_args = EngineArgs( model="meta-llama/Llama-3.2-3B-Instruct", enable_lora=True, max_loras=1, max_lora_rank=8, max_cpu_loras=2, max_num_seqs=256, ) return LLMEngine.from_engine_args(engine_args) def main(): """Main function that sets up and runs the prompt processing.""" engine = initialize_engine() lora_path = snapshot_download(repo_id="jeeejeee/llama32-3b-text2sql-spider") test_prompts = create_test_prompts(lora_path) process_requests(engine, test_prompts) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/disaggregated_prefill.py
examples/offline_inference/disaggregated_prefill.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file demonstrates the example usage of disaggregated prefilling We will launch 2 vllm instances (GPU 0 for prefill and GPU 1 for decode), and then transfer the KV cache between them. """ import os import time from multiprocessing import Event, Process from vllm import LLM, SamplingParams from vllm.config import KVTransferConfig def run_prefill(prefill_done): # We use GPU 0 for prefill node. os.environ["CUDA_VISIBLE_DEVICES"] = "0" # The prefill node receives two requests, while the decode node receives # three requests. So the decode node will only receive the KV Cache for # requests 1 and 3. The decode node will use the KV Cache of requests 1 # and 3 and do prefilling on request 2. prompts = [ "Hello, my name is", "Hi, your name is", # The decode node will actually "prefill" this request. "Tell me a very long story", ] sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=1) # Using P2pNcclConnector to transmit KV caches between vLLM instances. # This instance is the prefill node (kv_producer, rank 0). # The number of parallel instances for KV cache transfer is set to 2, # as required for P2pNcclConnector. ktc = KVTransferConfig( kv_connector="P2pNcclConnector", kv_role="kv_producer", kv_rank=0, kv_parallel_size=2, ) # Set GPU memory utilization to 0.8 for an A6000 GPU with 40GB # memory. You may need to adjust the value to fit your GPU. llm = LLM( model="meta-llama/Meta-Llama-3.1-8B-Instruct", kv_transfer_config=ktc, max_model_len=2000, gpu_memory_utilization=0.8, ) llm.generate(prompts, sampling_params) print("Prefill node is finished.") prefill_done.set() # To keep the prefill node running in case the decode node is not done; # otherwise, the script might exit prematurely, causing incomplete decoding. try: while True: time.sleep(1) except KeyboardInterrupt: print("Script stopped by user.") def run_decode(prefill_done): # We use GPU 1 for decode node. os.environ["CUDA_VISIBLE_DEVICES"] = "1" prompts = [ "Hello, my name is", "Hi, your name is", "Tell me a very long story", ] sampling_params = SamplingParams(temperature=0, top_p=0.95) # Using P2pNcclConnector to transmit KV caches between vLLM instances. # This instance is the decode node (kv_consumer, rank 1). # The number of parallel instances for KV cache transfer is set to 2, # as required for P2pNcclConnector. ktc = KVTransferConfig( kv_connector="P2pNcclConnector", kv_role="kv_consumer", kv_rank=1, kv_parallel_size=2, ) # Set GPU memory utilization to 0.8 for an A6000 GPU with 40GB # memory. You may need to adjust the value to fit your GPU. llm = LLM( model="meta-llama/Meta-Llama-3.1-8B-Instruct", kv_transfer_config=ktc, max_model_len=2000, gpu_memory_utilization=0.8, ) # Wait for the producer to start the pipe print("Waiting for prefill node to finish...") prefill_done.wait() # At this point when the prefill_done is set, the kv-cache should have been # transferred to this decode node, so we can start decoding. outputs = llm.generate(prompts, sampling_params) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") def main(): prefill_done = Event() prefill_process = Process(target=run_prefill, args=(prefill_done,)) decode_process = Process(target=run_decode, args=(prefill_done,)) # Start prefill node prefill_process.start() # Start decode node decode_process.start() # Terminate the prefill node when decode is finished decode_process.join() prefill_process.terminate() if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/load_sharded_state.py
examples/offline_inference/load_sharded_state.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Validates the loading of a model saved with the sharded_state format. This script demonstrates how to load a model that was previously saved using save_sharded_state.py and validates it by running inference. Example usage: (First need to save a sharded_state mode) python save_sharded_state.py \ --model /path/to/load \ --quantization deepspeedfp \ --tensor-parallel-size 8 \ --output /path/to/save/sharded/model python load_sharded_state.py \ --model /path/to/saved/sharded/model \ --load-format sharded_state \ --quantization deepspeedfp \ --tensor-parallel-size 8 \ --prompt "Hello, my name is" \ --max-tokens 50 """ import dataclasses from vllm import LLM, EngineArgs, SamplingParams from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() # Add engine arguments EngineArgs.add_cli_args(parser) # Override default load_format for clarity parser.set_defaults(load_format="sharded_state") # Add validation arguments parser.add_argument( "--prompt", type=str, default="Hello, world!", help="Prompt for validation" ) parser.add_argument( "--max-tokens", type=int, default=100, help="Maximum number of tokens to generate", ) parser.add_argument( "--temperature", type=float, default=0.7, help="Sampling temperature" ) parser.add_argument( "--top-p", type=float, default=1.0, help="Top-p sampling parameter" ) return parser.parse_args() def main(): args = parse_args() engine_args = EngineArgs.from_cli_args(args) print( f"Loading model from {engine_args.model} using format {engine_args.load_format}" ) print(f"Tensor parallel size: {engine_args.tensor_parallel_size}") # Load the model using engine args llm = LLM(**dataclasses.asdict(engine_args)) # Prepare sampling parameters sampling_params = SamplingParams( temperature=args.temperature, top_p=args.top_p, max_tokens=args.max_tokens, ) print("\nRunning inference:") print(f"Prompt: {args.prompt}") # Generate completion outputs = llm.generate(args.prompt, sampling_params) # Display generated text print("\nGenerated outputs:") for output in outputs: generated_text = output.outputs[0].text print("-" * 50) print(f"Full output: {args.prompt}{generated_text}") print("-" * 50) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/lora_with_quantization_inference.py
examples/offline_inference/lora_with_quantization_inference.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use LoRA with different quantization techniques for offline inference. Requires HuggingFace credentials for access. """ import gc import torch from huggingface_hub import snapshot_download from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams from vllm.lora.request import LoRARequest def create_test_prompts( lora_path: str, ) -> list[tuple[str, SamplingParams, LoRARequest | None]]: return [ # this is an example of using quantization without LoRA ( "My name is", SamplingParams(temperature=0.0, logprobs=1, max_tokens=128), None, ), # the next three examples use quantization with LoRA ( "my name is", SamplingParams(temperature=0.0, logprobs=1, max_tokens=128), LoRARequest("lora-test-1", 1, lora_path), ), ( "The capital of USA is", SamplingParams(temperature=0.0, logprobs=1, max_tokens=128), LoRARequest("lora-test-2", 1, lora_path), ), ( "The capital of France is", SamplingParams(temperature=0.0, logprobs=1, max_tokens=128), LoRARequest("lora-test-3", 1, lora_path), ), ] def process_requests( engine: LLMEngine, test_prompts: list[tuple[str, SamplingParams, LoRARequest | None]], ): """Continuously process a list of prompts and handle the outputs.""" request_id = 0 while test_prompts or engine.has_unfinished_requests(): if test_prompts: prompt, sampling_params, lora_request = test_prompts.pop(0) engine.add_request( str(request_id), prompt, sampling_params, lora_request=lora_request ) request_id += 1 request_outputs: list[RequestOutput] = engine.step() for request_output in request_outputs: if request_output.finished: print("----------------------------------------------------") print(f"Prompt: {request_output.prompt}") print(f"Output: {request_output.outputs[0].text}") def initialize_engine( model: str, quantization: str, lora_repo: str | None ) -> LLMEngine: """Initialize the LLMEngine.""" engine_args = EngineArgs( model=model, quantization=quantization, enable_lora=True, max_lora_rank=64, max_loras=4, ) return LLMEngine.from_engine_args(engine_args) def main(): """Main function that sets up and runs the prompt processing.""" test_configs = [ # QLoRA (https://arxiv.org/abs/2305.14314) { "name": "qlora_inference_example", "model": "huggyllama/llama-7b", "quantization": "bitsandbytes", "lora_repo": "timdettmers/qlora-flan-7b", }, { "name": "AWQ_inference_with_lora_example", "model": "TheBloke/TinyLlama-1.1B-Chat-v0.3-AWQ", "quantization": "awq", "lora_repo": "jashing/tinyllama-colorist-lora", }, { "name": "GPTQ_inference_with_lora_example", "model": "TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ", "quantization": "gptq", "lora_repo": "jashing/tinyllama-colorist-lora", }, ] for test_config in test_configs: print(f"~~~~~~~~~~~~~~~~ Running: {test_config['name']} ~~~~~~~~~~~~~~~~") engine = initialize_engine( test_config["model"], test_config["quantization"], test_config["lora_repo"] ) lora_path = snapshot_download(repo_id=test_config["lora_repo"]) test_prompts = create_test_prompts(lora_path) process_requests(engine, test_prompts) # Clean up the GPU memory for the next test del engine gc.collect() torch.cuda.empty_cache() if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/rlhf_online_quant.py
examples/offline_inference/rlhf_online_quant.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Demonstrates reinforcement learning from human feedback (RLHF) using vLLM and Ray. The script separates training and inference workloads onto distinct GPUs so that Ray can manage process placement and inter-process communication. A Hugging Face Transformer model occupies GPU 0 for training, whereas a tensor-parallel vLLM inference engine occupies GPU 1–2. The example performs the following steps: * Load the training model on GPU 0. * Split the inference model across GPUs 1–2 using vLLM's tensor parallelism and Ray placement groups. * Generate text from a list of prompts using the inference engine. * Update the weights of the training model and broadcast the updated weights to the inference engine by using a Ray collective RPC group. Note that for demonstration purposes we simply zero out the weights. For a production-ready implementation that supports multiple training and inference replicas, see the OpenRLHF framework: https://github.com/OpenRLHF/OpenRLHF This example assumes a single-node cluster with three GPUs, but Ray supports multi-node clusters. vLLM expects the GPUs are only used for vLLM workloads. Residual GPU activity interferes with vLLM memory profiling and causes unexpected behavior. """ import json import os import ray import torch from ray.util.placement_group import placement_group from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy from rlhf_utils import stateless_init_process_group from torchao.core.config import config_to_dict from torchao.quantization import ( Float8DynamicActivationFloat8WeightConfig, PerRow, ) from transformers import AutoModelForCausalLM from vllm import LLM, SamplingParams from vllm.utils.network_utils import get_ip, get_open_port class MyLLM(LLM): """Configure the vLLM worker for Ray placement group execution.""" def __init__(self, *args, **kwargs): # Remove the top-level CUDA_VISIBLE_DEVICES variable set by Ray # so that vLLM can manage its own device placement within the worker. os.environ.pop("CUDA_VISIBLE_DEVICES", None) super().__init__(*args, **kwargs) # Load the OPT-125M model onto GPU 0 for the training workload. train_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") train_model.to("cuda:0") # Initialize Ray and set the visible devices. The vLLM engine will # be placed on GPUs 1 and 2. os.environ["CUDA_VISIBLE_DEVICES"] = "1,2" ray.init() # Create a placement group that reserves GPU 1–2 for the vLLM inference engine. # Learn more about Ray placement groups: # https://docs.ray.io/en/latest/ray-core/scheduling/placement-group.html pg_inference = placement_group([{"GPU": 1, "CPU": 0}] * 2) ray.get(pg_inference.ready()) scheduling_inference = PlacementGroupSchedulingStrategy( placement_group=pg_inference, placement_group_capture_child_tasks=True, placement_group_bundle_index=0, ) # Launch the vLLM inference engine. The `enforce_eager` flag reduces # start-up latency. # generate torchao quantization config for RL rollout # see https://github.com/vllm-project/vllm/pull/23014 for instructions to # use serialized config files instead of passing around json string config = Float8DynamicActivationFloat8WeightConfig(granularity=PerRow()) json_str = json.dumps(config_to_dict(config)) llm = ray.remote( num_cpus=0, num_gpus=0, scheduling_strategy=scheduling_inference, )(MyLLM).remote( model="facebook/opt-125m", hf_overrides={"quantization_config_dict_json": json_str}, enforce_eager=True, worker_extension_cls="rlhf_utils.WorkerExtension", tensor_parallel_size=2, distributed_executor_backend="ray", ) # Generate text from the prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] sampling_params = SamplingParams(temperature=0) outputs = ray.get(llm.generate.remote(prompts, sampling_params)) print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) # Set up the communication channel between the training process and the # inference engine. master_address = get_ip() master_port = get_open_port() handle = llm.collective_rpc.remote( "init_weight_update_group", args=(master_address, master_port, 1, 3) ) model_update_group = stateless_init_process_group( master_address, master_port, 0, 3, torch.device("cuda:0") ) ray.get(handle) # Simulate a training step by zeroing out all model weights. # In a real RLHF training loop the weights would be updated using the gradient # from an RL objective such as PPO on a reward model. for name, p in train_model.named_parameters(): p.data.zero_() # Synchronize the updated weights to the inference engine. for name, p in train_model.named_parameters(): dtype_name = str(p.dtype).split(".")[-1] handle = llm.collective_rpc.remote( "update_weight", args=(name, dtype_name, p.shape) ) model_update_group.broadcast(p, src=0, stream=torch.cuda.current_stream()) ray.get(handle) # Verify that the inference weights have been updated. assert all(ray.get(llm.collective_rpc.remote("check_weights_changed"))) # Generate text with the updated model. The output is expected to be nonsense # because the weights are zero. outputs_updated = ray.get(llm.generate.remote(prompts, sampling_params)) print("-" * 50) for output in outputs_updated: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/async_llm_streaming.py
examples/offline_inference/async_llm_streaming.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Simple example demonstrating streaming offline inference with AsyncLLM (V1 engine). This script shows the core functionality of vLLM's AsyncLLM engine for streaming token-by-token output in offline inference scenarios. It demonstrates DELTA mode streaming where you receive new tokens as they are generated. Usage: python examples/offline_inference/async_llm_streaming.py """ import asyncio from vllm import SamplingParams from vllm.engine.arg_utils import AsyncEngineArgs from vllm.sampling_params import RequestOutputKind from vllm.v1.engine.async_llm import AsyncLLM async def stream_response(engine: AsyncLLM, prompt: str, request_id: str) -> None: """ Stream response from AsyncLLM and display tokens as they arrive. This function demonstrates the core streaming pattern: 1. Create SamplingParams with DELTA output kind 2. Call engine.generate() and iterate over the async generator 3. Print new tokens as they arrive 4. Handle the finished flag to know when generation is complete """ print(f"\n🚀 Prompt: {prompt!r}") print("💬 Response: ", end="", flush=True) # Configure sampling parameters for streaming sampling_params = SamplingParams( max_tokens=100, temperature=0.8, top_p=0.95, seed=42, # For reproducible results output_kind=RequestOutputKind.DELTA, # Get only new tokens each iteration ) try: # Stream tokens from AsyncLLM async for output in engine.generate( request_id=request_id, prompt=prompt, sampling_params=sampling_params ): # Process each completion in the output for completion in output.outputs: # In DELTA mode, we get only new tokens generated since last iteration new_text = completion.text if new_text: print(new_text, end="", flush=True) # Check if generation is finished if output.finished: print("\n✅ Generation complete!") break except Exception as e: print(f"\n❌ Error during streaming: {e}") raise async def main(): print("🔧 Initializing AsyncLLM...") # Create AsyncLLM engine with simple configuration engine_args = AsyncEngineArgs( model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, # Faster startup for examples ) engine = AsyncLLM.from_engine_args(engine_args) try: # Example prompts to demonstrate streaming prompts = [ "The future of artificial intelligence is", "In a galaxy far, far away", "The key to happiness is", ] print(f"🎯 Running {len(prompts)} streaming examples...") # Process each prompt for i, prompt in enumerate(prompts, 1): print(f"\n{'=' * 60}") print(f"Example {i}/{len(prompts)}") print(f"{'=' * 60}") request_id = f"stream-example-{i}" await stream_response(engine, prompt, request_id) # Brief pause between examples if i < len(prompts): await asyncio.sleep(0.5) print("\n🎉 All streaming examples completed!") finally: # Always clean up the engine print("🔧 Shutting down engine...") engine.shutdown() if __name__ == "__main__": try: asyncio.run(main()) except KeyboardInterrupt: print("\n🛑 Interrupted by user")
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/qwen3_omni/only_thinker.py
examples/offline_inference/qwen3_omni/only_thinker.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use vLLM for running offline inference with the correct prompt format on Qwen2.5-Omni (thinker only). """ from typing import NamedTuple from vllm import LLM, SamplingParams from vllm.assets.audio import AudioAsset from vllm.assets.image import ImageAsset from vllm.assets.video import VideoAsset from vllm.multimodal.image import convert_image_mode from vllm.utils.argparse_utils import FlexibleArgumentParser class QueryResult(NamedTuple): inputs: dict limit_mm_per_prompt: dict[str, int] # NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on # lower-end GPUs. # Unless specified, these settings have been tested to work on a single L4. default_system = ( "You are Qwen, a virtual human developed by the Qwen Team, Alibaba " "Group, capable of perceiving auditory and visual inputs, as well as " "generating text and speech." ) def get_mixed_modalities_query() -> QueryResult: question = ( "What is recited in the audio? " "What is the content of this image? Why is this video funny?" ) prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n<|audio_start|><|audio_pad|><|audio_end|>" "<|vision_start|><|image_pad|><|vision_end|>" "<|vision_start|><|video_pad|><|vision_end|>" f"{question}<|im_end|>\n" f"<|im_start|>assistant\n" ) return QueryResult( inputs={ "prompt": prompt, "multi_modal_data": { "audio": AudioAsset("mary_had_lamb").audio_and_sample_rate, "image": convert_image_mode( ImageAsset("cherry_blossom").pil_image, "RGB" ), "video": VideoAsset(name="baby_reading", num_frames=16).np_ndarrays, }, }, limit_mm_per_prompt={"audio": 1, "image": 1, "video": 1}, ) def get_use_audio_in_video_query() -> QueryResult: question = ( "Describe the content of the video in details, then convert what the " "baby say into text." ) prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n<|vision_start|><|video_pad|><|vision_end|>" f"{question}<|im_end|>\n" f"<|im_start|>assistant\n" ) asset = VideoAsset(name="baby_reading", num_frames=16) audio = asset.get_audio(sampling_rate=16000) return QueryResult( inputs={ "prompt": prompt, "multi_modal_data": { "video": asset.np_ndarrays, "audio": audio, }, "mm_processor_kwargs": { "use_audio_in_video": True, }, }, limit_mm_per_prompt={"audio": 1, "video": 1}, ) def get_multi_audios_query() -> QueryResult: question = "Are these two audio clips the same?" prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n<|audio_start|><|audio_pad|><|audio_end|>" "<|audio_start|><|audio_pad|><|audio_end|>" f"{question}<|im_end|>\n" f"<|im_start|>assistant\n" ) return QueryResult( inputs={ "prompt": prompt, "multi_modal_data": { "audio": [ AudioAsset("winning_call").audio_and_sample_rate, AudioAsset("mary_had_lamb").audio_and_sample_rate, ], }, }, limit_mm_per_prompt={ "audio": 2, }, ) query_map = { "mixed_modalities": get_mixed_modalities_query, "use_audio_in_video": get_use_audio_in_video_query, "multi_audios": get_multi_audios_query, } def main(args): model_name = "Qwen/Qwen3-Omni-30B-A3B-Instruct" query_result = query_map[args.query_type]() llm = LLM( model=model_name, max_model_len=12800, max_num_seqs=5, limit_mm_per_prompt=query_result.limit_mm_per_prompt, seed=args.seed, ) # We set temperature to 0.2 so that outputs can be different # even when all prompts are identical when running batch inference. sampling_params = SamplingParams(temperature=0.2, max_tokens=256) outputs = llm.generate(query_result.inputs, sampling_params=sampling_params) for o in outputs: generated_text = o.outputs[0].text print(generated_text) def parse_args(): parser = FlexibleArgumentParser( description="Demo on using vLLM for offline inference with " "audio language models" ) parser.add_argument( "--query-type", "-q", type=str, default="mixed_modalities", choices=query_map.keys(), help="Query type.", ) parser.add_argument( "--seed", type=int, default=0, help="Set the seed when initializing `vllm.LLM`.", ) return parser.parse_args() if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/logits_processor/custom_req.py
examples/offline_inference/logits_processor/custom_req.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """This example demonstrates wrapping a request-level logits processor to be compatible with vLLM's batch-level logits processing For demo purposes, a dummy logits processor is employed which, if `target_token` is passed as a keyword argument to `SamplingParams.extra_args`, will mask out all tokens except `target_token`. This logits processor can be applied to a vector of logits associated with a single decode step for a single request. The logits processor cannot be applied to a request which does not pass in a `target_token` custom argument. The request-level dummy logits processor is wrapped to create a batch-level logits processor, which can apply the logits processor to output logits from all requests in the persistent batch in a given decode step. For requests which do not provide a `target_token` argument, the corresponding row of `logits` will not be modified. A batch is constructed with `temperature=0.0` and 50% of requests specifying `target_token`, and for these requests - and *only* these requests - we expect the `target_token` to be decoded in each step, yielding an output similar to that shown below: Generated Outputs: ------------------------------------------------------------ Prompt: 'Hello, my name is' Output: " ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' '" ------------------------------------------------------------ Prompt: 'The president of the United States is' Output: " not a racist. He is a racist.\nHe's a racist because he" ------------------------------------------------------------ Prompt: 'The capital of France is' Output: ' also also also also also also also also also also also also also also also also' ------------------------------------------------------------ Prompt: 'The future of AI is' Output: ' in the hands of the people.\n\nThe future of AI is in the' ------------------------------------------------------------ """ from typing import Any import torch from vllm import LLM, SamplingParams from vllm.logger import init_logger from vllm.v1.sample.logits_processor import ( AdapterLogitsProcessor, RequestLogitsProcessor, ) logger = init_logger(__name__) class DummyPerReqLogitsProcessor: """The request-level logits processor masks out all logits except the token id identified by `target_token`""" def __init__(self, target_token: int) -> None: """Specify `target_token`""" self.target_token = target_token def __call__( self, output_ids: list[int], logits: torch.Tensor, ) -> torch.Tensor: val_to_keep = logits[self.target_token].item() logits[:] = float("-inf") logits[self.target_token] = val_to_keep return logits class WrappedPerReqLogitsProcessor(AdapterLogitsProcessor): """Example of wrapping a fake request-level logit processor to create a batch-level logits processor""" @classmethod def validate_params(cls, params: SamplingParams): target_token: Any | None = params.extra_args and params.extra_args.get( "target_token" ) if target_token is not None and not isinstance(target_token, int): raise ValueError(f"target_token value {target_token} is not int") def is_argmax_invariant(self) -> bool: return False def new_req_logits_processor( self, params: SamplingParams, ) -> RequestLogitsProcessor | None: """This method returns a new request-level logits processor, customized to the `target_token` value associated with a particular request. Returns None if the logits processor should not be applied to the particular request. To use the logits processor the request must have a "target_token" custom argument with an integer value. Args: params: per-request sampling params Returns: `Callable` request logits processor, or None """ target_token: Any | None = params.extra_args and params.extra_args.get( "target_token" ) if target_token is None: return None return DummyPerReqLogitsProcessor(target_token) # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a mixture of requests which do and don't utilize the dummy logitproc sampling_params_list = [ SamplingParams(temperature=0.0, extra_args={"target_token": 128}), SamplingParams(temperature=0.0), SamplingParams(temperature=0.0, extra_args={"target_token": 67}), SamplingParams(temperature=0.0), ] def main(): # Create an LLM. llm = LLM( model="facebook/opt-125m", logits_processors=[WrappedPerReqLogitsProcessor], ) # Generate texts from the prompts. # The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params_list) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}") print(f"Output: {generated_text!r}") print("-" * 60) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/logits_processor/custom_req_init.py
examples/offline_inference/logits_processor/custom_req_init.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """This example demonstrates a special case of wrapping a request-level logits processor, namely the case where it is necessary to utilize engine config or environment info passed to the constructor. The subclass must override the wrapper base class `__init__()` method to access the engine config, the device identifier, or the flag which indicates whether pinned memory is available. For demo purposes, a request-level dummy logits processor is employed which causes the same token (`target_token`) to be decoded in each step. The request-level dummy logits processor is wrapped to create a batch-level logits processor, which can apply the logits processor to output logits from all requests in the persistent batch in a given decode step. The wrapped dummy logits processor below models a scenario where we must disable the logits processor on non-"cuda" platforms. The wrapper base class `__init__()` is overridden in order to check this condition and set a flag. A batch is constructed with `temperature=0.0` and 50% of requests specifying `target_token`, and for these requests - and *only* these requests - we expect that on a "cuda" device the output will look something like: Generated Outputs: ------------------------------------------------------------ Prompt: 'Hello, my name is' Output: " ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' '" ------------------------------------------------------------ Prompt: 'The president of the United States is' Output: " not a racist. He is a racist.\nHe's a racist because he" ------------------------------------------------------------ Prompt: 'The capital of France is' Output: ' also also also also also also also also also also also also also also also also' ------------------------------------------------------------ Prompt: 'The future of AI is' Output: ' in the hands of the people.\n\nThe future of AI is in the' ------------------------------------------------------------ which indicates that the logits processor is running. However, on a non-"cuda" device, the first and third requests would not repeat the same token. """ import torch from vllm import LLM, SamplingParams from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.v1.sample.logits_processor import ( AdapterLogitsProcessor, RequestLogitsProcessor, ) logger = init_logger(__name__) class DummyPerReqLogitsProcessor: """The request-level logits processor masks out all logits except the token id identified by `target_token`""" def __init__(self, target_token: int) -> None: """Specify `target_token`""" self.target_token = target_token def __call__( self, output_ids: list[int], logits: torch.Tensor, ) -> torch.Tensor: val_to_keep = logits[self.target_token].item() logits[:] = float("-inf") logits[self.target_token] = val_to_keep return logits class WrappedPerReqLogitsProcessor(AdapterLogitsProcessor): """Example of overriding the wrapper class `__init__()` in order to utilize info about the device type""" @classmethod def validate_params(cls, params: SamplingParams): target_token = params.extra_args and params.extra_args.get("target_token") if target_token is not None and not isinstance(target_token, int): raise ValueError( f"`target_token` has to be an integer, got {target_token}." ) def __init__( self, vllm_config: VllmConfig, device: torch.device, is_pin_memory: bool ): super().__init__(vllm_config, device, is_pin_memory) self.is_cuda = device.type == "cuda" def is_argmax_invariant(self) -> bool: return False def new_req_logits_processor( self, params: SamplingParams, ) -> RequestLogitsProcessor | None: """This method returns a new request-level logits processor, customized to the `target_token` value associated with a particular request. Returns None if the logits processor should not be applied to the particular request. To use the logits processor the request must have a "target_token" custom argument with an integer value, and the device must be "cuda"-type Args: params: per-request sampling params Returns: `Callable` request logits processor, or None """ if ( not self.is_cuda or ( target_token := params.extra_args and params.extra_args.get("target_token") ) is None ): return None return DummyPerReqLogitsProcessor(target_token) # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a mixture of requests which do and don't utilize the dummy logitproc sampling_params_list = [ SamplingParams(temperature=0.0, extra_args={"target_token": 128}), SamplingParams(temperature=0.0), SamplingParams(temperature=0.0, extra_args={"target_token": 67}), SamplingParams(temperature=0.0), ] def main(): # Create an LLM. llm = LLM( model="facebook/opt-125m", logits_processors=[WrappedPerReqLogitsProcessor], ) # Generate texts from the prompts. # The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params_list) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}") print(f"Output: {generated_text!r}") print("-" * 60) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/logits_processor/custom.py
examples/offline_inference/logits_processor/custom.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """This example demonstrates instantiating vLLM with a custom logits processor class object. For a basic example of implementing a custom logits processor, see the `DummyLogitsProcessor` implementation in `vllm/test_utils.py`. For testing purposes, a dummy logits processor is employed which, if `target_token` is passed as a keyword argument to `SamplingParams.extra_args`, will mask out all tokens except `target_token`. A batch is constructed with `temperature=0.0` and 50% of requests specifying `target_token`, and for these requests - and *only* these requests - we expect the `target_token` to be decoded in each step, yielding an output similar to that shown below: Generated Outputs: ------------------------------------------------------------ Prompt: 'Hello, my name is' Output: " ' ' ' ' ' ' ' ' ' ' ' ' ' ' ' '" ------------------------------------------------------------ Prompt: 'The president of the United States is' Output: " not a racist. He is a racist.\nHe's a racist because he" ------------------------------------------------------------ Prompt: 'The capital of France is' Output: ' also also also also also also also also also also also also also also also also' ------------------------------------------------------------ Prompt: 'The future of AI is' Output: ' in the hands of the people.\n\nThe future of AI is in the' ------------------------------------------------------------ """ from typing import Any import torch from vllm import LLM, SamplingParams from vllm.config import VllmConfig from vllm.v1.sample.logits_processor import ( BatchUpdate, LogitsProcessor, ) from vllm.v1.sample.logits_processor.builtin import process_dict_updates # Hypothetical custom logits processor class DummyLogitsProcessor(LogitsProcessor): """Fake logit processor to support unit testing and examples""" @classmethod def validate_params(cls, params: SamplingParams): target_token: Any | None = params.extra_args and params.extra_args.get( "target_token" ) if target_token is not None and not isinstance(target_token, int): raise ValueError( f"target_token value {target_token} {type(target_token)} is not int" ) def __init__( self, vllm_config: VllmConfig, device: torch.device, is_pin_memory: bool ): self.req_info: dict[int, int] = {} def is_argmax_invariant(self) -> bool: return False def update_state(self, batch_update: BatchUpdate | None): def extract_extra_arg(params: SamplingParams) -> int | None: self.validate_params(params) return params.extra_args and params.extra_args.get("target_token") process_dict_updates( self.req_info, batch_update, # This function returns the LP's per-request state based on the # request details, or None if this LP does not apply to the # request. lambda params, _, __: extract_extra_arg(params), ) def apply(self, logits: torch.Tensor) -> torch.Tensor: if not self.req_info: return logits # Save target values before modification cols = torch.tensor( list(self.req_info.values()), dtype=torch.long, device=logits.device ) rows = torch.tensor( list(self.req_info.keys()), dtype=torch.long, device=logits.device ) values_to_keep = logits[rows, cols].clone() # Mask all but target tokens logits[rows] = float("-inf") logits[rows, cols] = values_to_keep return logits # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a mixture of requests which do and don't utilize the dummy logitproc sampling_params_list = [ SamplingParams(temperature=0.0, extra_args={"target_token": 128}), SamplingParams(temperature=0.0), SamplingParams(temperature=0.0, extra_args={"target_token": 67}), SamplingParams(temperature=0.0), ] def main(): # Create an LLM. llm = LLM( model="facebook/opt-125m", logits_processors=[DummyLogitsProcessor], ) # Generate texts from the prompts. # The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params_list) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}") print(f"Output: {generated_text!r}") print("-" * 60) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/basic/embed.py
examples/offline_inference/basic/embed.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from argparse import Namespace from vllm import LLM, EngineArgs from vllm.attention.backends.registry import AttentionBackendEnum from vllm.config import AttentionConfig from vllm.platforms import current_platform from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="intfloat/e5-small", runner="pooling", enforce_eager=True, ) return parser.parse_args() def main(args: Namespace): if current_platform.is_rocm(): args.attention_config = AttentionConfig( backend=AttentionBackendEnum.FLEX_ATTENTION ) # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create an LLM. # You should pass runner="pooling" for embedding models llm = LLM(**vars(args)) # Generate embedding. The output is a list of EmbeddingRequestOutputs. outputs = llm.embed(prompts) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for prompt, output in zip(prompts, outputs): embeds = output.outputs.embedding embeds_trimmed = ( (str(embeds[:16])[:-1] + ", ...]") if len(embeds) > 16 else embeds ) print(f"Prompt: {prompt!r} \nEmbeddings: {embeds_trimmed} (size={len(embeds)})") print("-" * 60) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/basic/chat.py
examples/offline_inference/basic/chat.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, EngineArgs from vllm.utils.argparse_utils import FlexibleArgumentParser def create_parser(): parser = FlexibleArgumentParser() # Add engine args EngineArgs.add_cli_args(parser) parser.set_defaults(model="meta-llama/Llama-3.2-1B-Instruct") # Add sampling params sampling_group = parser.add_argument_group("Sampling parameters") sampling_group.add_argument("--max-tokens", type=int) sampling_group.add_argument("--temperature", type=float) sampling_group.add_argument("--top-p", type=float) sampling_group.add_argument("--top-k", type=int) # Add example params parser.add_argument("--chat-template-path", type=str) return parser def main(args: dict): # Pop arguments not used by LLM max_tokens = args.pop("max_tokens") temperature = args.pop("temperature") top_p = args.pop("top_p") top_k = args.pop("top_k") chat_template_path = args.pop("chat_template_path") # Create an LLM llm = LLM(**args) # Create sampling params object sampling_params = llm.get_default_sampling_params() if max_tokens is not None: sampling_params.max_tokens = max_tokens if temperature is not None: sampling_params.temperature = temperature if top_p is not None: sampling_params.top_p = top_p if top_k is not None: sampling_params.top_k = top_k def print_outputs(outputs): print("\nGenerated Outputs:\n" + "-" * 80) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\n") print(f"Generated text: {generated_text!r}") print("-" * 80) print("=" * 80) # In this script, we demonstrate how to pass input to the chat method: conversation = [ {"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hello! How can I assist you today?"}, { "role": "user", "content": "Write an essay about the importance of higher education.", }, ] outputs = llm.chat(conversation, sampling_params, use_tqdm=False) print_outputs(outputs) # You can run batch inference with llm.chat API conversations = [conversation for _ in range(10)] # We turn on tqdm progress bar to verify it's indeed running batch inference outputs = llm.chat(conversations, sampling_params, use_tqdm=True) print_outputs(outputs) # A chat template can be optionally supplied. # If not, the model will use its default chat template. if chat_template_path is not None: with open(chat_template_path) as f: chat_template = f.read() outputs = llm.chat( conversations, sampling_params, use_tqdm=False, chat_template=chat_template, ) print_outputs(outputs) if __name__ == "__main__": parser = create_parser() args: dict = vars(parser.parse_args()) main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/basic/classify.py
examples/offline_inference/basic/classify.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from argparse import Namespace from vllm import LLM, EngineArgs from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="jason9693/Qwen2.5-1.5B-apeach", runner="pooling", enforce_eager=True, ) return parser.parse_args() def main(args: Namespace): # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create an LLM. # You should pass runner="pooling" for classification models llm = LLM(**vars(args)) # Generate logits. The output is a list of ClassificationRequestOutputs. outputs = llm.classify(prompts) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for prompt, output in zip(prompts, outputs): probs = output.outputs.probs probs_trimmed = (str(probs[:16])[:-1] + ", ...]") if len(probs) > 16 else probs print( f"Prompt: {prompt!r} \n" f"Class Probabilities: {probs_trimmed} (size={len(probs)})" ) print("-" * 60) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/basic/generate.py
examples/offline_inference/basic/generate.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, EngineArgs from vllm.utils.argparse_utils import FlexibleArgumentParser def create_parser(): parser = FlexibleArgumentParser() # Add engine args EngineArgs.add_cli_args(parser) parser.set_defaults(model="meta-llama/Llama-3.2-1B-Instruct") # Add sampling params sampling_group = parser.add_argument_group("Sampling parameters") sampling_group.add_argument("--max-tokens", type=int) sampling_group.add_argument("--temperature", type=float) sampling_group.add_argument("--top-p", type=float) sampling_group.add_argument("--top-k", type=int) return parser def main(args: dict): # Pop arguments not used by LLM max_tokens = args.pop("max_tokens") temperature = args.pop("temperature") top_p = args.pop("top_p") top_k = args.pop("top_k") # Create an LLM llm = LLM(**args) # Create a sampling params object sampling_params = llm.get_default_sampling_params() if max_tokens is not None: sampling_params.max_tokens = max_tokens if temperature is not None: sampling_params.temperature = temperature if top_p is not None: sampling_params.top_p = top_p if top_k is not None: sampling_params.top_k = top_k # Generate texts from the prompts. The output is a list of RequestOutput # objects that contain the prompt, generated text, and other information. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] outputs = llm.generate(prompts, sampling_params) # Print the outputs. print("-" * 50) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 50) if __name__ == "__main__": parser = create_parser() args: dict = vars(parser.parse_args()) main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/basic/basic.py
examples/offline_inference/basic/basic.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, SamplingParams # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create a sampling params object. sampling_params = SamplingParams(temperature=0.8, top_p=0.95) def main(): # Create an LLM. llm = LLM(model="facebook/opt-125m") # Generate texts from the prompts. # The output is a list of RequestOutput objects # that contain the prompt, generated text, and other information. outputs = llm.generate(prompts, sampling_params) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}") print(f"Output: {generated_text!r}") print("-" * 60) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/basic/reward.py
examples/offline_inference/basic/reward.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from argparse import Namespace from vllm import LLM, EngineArgs from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="internlm/internlm2-1_8b-reward", runner="pooling", enforce_eager=True, max_model_len=1024, trust_remote_code=True, ) return parser.parse_args() def main(args: Namespace): # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create an LLM. # You should pass runner="pooling" for reward models llm = LLM(**vars(args)) # Generate rewards. The output is a list of PoolingRequestOutput. outputs = llm.reward(prompts) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for prompt, output in zip(prompts, outputs): rewards = output.outputs.data rewards_trimmed = ( (str(rewards[:16])[:-1] + ", ...]") if len(rewards) > 16 else rewards ) print(f"Prompt: {prompt!r} \nReward: {rewards_trimmed} (size={len(rewards)})") print("-" * 60) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/basic/score.py
examples/offline_inference/basic/score.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from argparse import Namespace from vllm import LLM, EngineArgs from vllm.attention.backends.registry import AttentionBackendEnum from vllm.config import AttentionConfig from vllm.platforms import current_platform from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="BAAI/bge-reranker-v2-m3", runner="pooling", enforce_eager=True, ) return parser.parse_args() def main(args: Namespace): if current_platform.is_rocm(): args.attention_config = AttentionConfig( backend=AttentionBackendEnum.FLEX_ATTENTION ) # Sample prompts. text_1 = "What is the capital of France?" texts_2 = [ "The capital of Brazil is Brasilia.", "The capital of France is Paris.", ] # Create an LLM. # You should pass runner="pooling" for cross-encoder models llm = LLM(**vars(args)) # Generate scores. The output is a list of ScoringRequestOutputs. outputs = llm.score(text_1, texts_2) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for text_2, output in zip(texts_2, outputs): score = output.outputs.score print(f"Pair: {[text_1, text_2]!r} \nScore: {score}") print("-" * 60) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/disaggregated-prefill-v1/prefill_example.py
examples/offline_inference/disaggregated-prefill-v1/prefill_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, SamplingParams from vllm.config import KVTransferConfig def read_prompts(): context = "Hi " * 1000 context2 = "Hey " * 500 return [ context + "Hello, my name is", context + "The capital of France is", context2 + "Your name is", context2 + "The capital of China is", ] def main(): prompts = read_prompts() sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=1) llm = LLM( model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, gpu_memory_utilization=0.8, kv_transfer_config=KVTransferConfig( kv_connector="ExampleConnector", kv_role="kv_both", kv_connector_extra_config={"shared_storage_path": "local_storage"}, ), ) # , max_model_len=2048, max_num_batched_tokens=2048) # 1ST generation (prefill instance) outputs = llm.generate( prompts, sampling_params, ) new_prompts = [] print("-" * 30) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text new_prompts.append(prompt + generated_text) print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 30) # Write new_prompts to output.txt with open("output.txt", "w") as f: for prompt in new_prompts: f.write(prompt + "\n") print(f"Saved {len(new_prompts)} prompts to output.txt") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/disaggregated-prefill-v1/decode_example.py
examples/offline_inference/disaggregated-prefill-v1/decode_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, SamplingParams from vllm.config import KVTransferConfig def read_prompts(): """Read prompts from output.txt""" prompts = [] try: with open("output.txt") as f: for line in f: prompts.append(line.strip()) print(f"Loaded {len(prompts)} prompts from output.txt") return prompts except FileNotFoundError: print("Error: output.txt file not found") exit(-1) def main(): prompts = read_prompts() sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) llm = LLM( model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, gpu_memory_utilization=0.8, max_num_batched_tokens=64, max_num_seqs=16, kv_transfer_config=KVTransferConfig( kv_connector="ExampleConnector", kv_role="kv_both", kv_connector_extra_config={"shared_storage_path": "local_storage"}, ), ) # , max_model_len=2048, max_num_batched_tokens=2048) # 1ST generation (prefill instance) outputs = llm.generate(prompts, sampling_params) print("-" * 30) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 30) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/qwen2_5_omni/only_thinker.py
examples/offline_inference/qwen2_5_omni/only_thinker.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use vLLM for running offline inference with the correct prompt format on Qwen2.5-Omni (thinker only). """ from typing import NamedTuple from vllm import LLM, SamplingParams from vllm.assets.audio import AudioAsset from vllm.assets.image import ImageAsset from vllm.assets.video import VideoAsset from vllm.multimodal.image import convert_image_mode from vllm.utils.argparse_utils import FlexibleArgumentParser class QueryResult(NamedTuple): inputs: dict limit_mm_per_prompt: dict[str, int] # NOTE: The default `max_num_seqs` and `max_model_len` may result in OOM on # lower-end GPUs. # Unless specified, these settings have been tested to work on a single L4. default_system = ( "You are Qwen, a virtual human developed by the Qwen Team, Alibaba " "Group, capable of perceiving auditory and visual inputs, as well as " "generating text and speech." ) def get_mixed_modalities_query() -> QueryResult: question = ( "What is recited in the audio? " "What is the content of this image? Why is this video funny?" ) prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n<|audio_bos|><|AUDIO|><|audio_eos|>" "<|vision_bos|><|IMAGE|><|vision_eos|>" "<|vision_bos|><|VIDEO|><|vision_eos|>" f"{question}<|im_end|>\n" f"<|im_start|>assistant\n" ) return QueryResult( inputs={ "prompt": prompt, "multi_modal_data": { "audio": AudioAsset("mary_had_lamb").audio_and_sample_rate, "image": convert_image_mode( ImageAsset("cherry_blossom").pil_image, "RGB" ), "video": VideoAsset(name="baby_reading", num_frames=16).np_ndarrays, }, }, limit_mm_per_prompt={"audio": 1, "image": 1, "video": 1}, ) def get_use_audio_in_video_query() -> QueryResult: question = ( "Describe the content of the video, then convert what the baby say into text." ) prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n<|vision_bos|><|VIDEO|><|vision_eos|>" f"{question}<|im_end|>\n" f"<|im_start|>assistant\n" ) asset = VideoAsset(name="baby_reading", num_frames=16) audio = asset.get_audio(sampling_rate=16000) return QueryResult( inputs={ "prompt": prompt, "multi_modal_data": { "video": asset.np_ndarrays, "audio": audio, }, "mm_processor_kwargs": { "use_audio_in_video": True, }, }, limit_mm_per_prompt={"audio": 1, "video": 1}, ) def get_multi_audios_query() -> QueryResult: question = "Are these two audio clips the same?" prompt = ( f"<|im_start|>system\n{default_system}<|im_end|>\n" "<|im_start|>user\n<|audio_bos|><|AUDIO|><|audio_eos|>" "<|audio_bos|><|AUDIO|><|audio_eos|>" f"{question}<|im_end|>\n" f"<|im_start|>assistant\n" ) return QueryResult( inputs={ "prompt": prompt, "multi_modal_data": { "audio": [ AudioAsset("winning_call").audio_and_sample_rate, AudioAsset("mary_had_lamb").audio_and_sample_rate, ], }, }, limit_mm_per_prompt={ "audio": 2, }, ) query_map = { "mixed_modalities": get_mixed_modalities_query, "use_audio_in_video": get_use_audio_in_video_query, "multi_audios": get_multi_audios_query, } def main(args): model_name = "Qwen/Qwen2.5-Omni-7B" query_result = query_map[args.query_type]() llm = LLM( model=model_name, max_model_len=5632, max_num_seqs=5, limit_mm_per_prompt=query_result.limit_mm_per_prompt, seed=args.seed, ) # We set temperature to 0.2 so that outputs can be different # even when all prompts are identical when running batch inference. sampling_params = SamplingParams(temperature=0.2, max_tokens=64) outputs = llm.generate(query_result.inputs, sampling_params=sampling_params) for o in outputs: generated_text = o.outputs[0].text print(generated_text) def parse_args(): parser = FlexibleArgumentParser( description="Demo on using vLLM for offline inference with " "audio language models" ) parser.add_argument( "--query-type", "-q", type=str, default="mixed_modalities", choices=query_map.keys(), help="Query type.", ) parser.add_argument( "--seed", type=int, default=0, help="Set the seed when initializing `vllm.LLM`.", ) return parser.parse_args() if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/kv_load_failure_recovery/prefill_example.py
examples/offline_inference/kv_load_failure_recovery/prefill_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from vllm import LLM, SamplingParams from vllm.config import KVTransferConfig def read_prompts(): context = "Hi " * 1000 context2 = "Hey " * 500 return [ context + "Hello, my name is", context + "The capital of France is", context2 + "Your name is", context2 + "The capital of China is", ] def main(): prompts = read_prompts() sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=1) llm = LLM( model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, gpu_memory_utilization=0.8, kv_transfer_config=KVTransferConfig( kv_connector="ExampleConnector", kv_role="kv_both", kv_connector_extra_config={"shared_storage_path": "local_storage"}, ), ) # , max_model_len=2048, max_num_batched_tokens=2048) # 1ST generation (prefill instance) outputs = llm.generate( prompts, sampling_params, ) new_prompts = [] print("-" * 30) for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text new_prompts.append(prompt + generated_text) print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") print("-" * 30) # Write new_prompts to prefill_output.txt with open("prefill_output.txt", "w") as f: for prompt in new_prompts: f.write(prompt + "\n") print(f"Saved {len(new_prompts)} prompts to prefill_output.txt") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/kv_load_failure_recovery/decode_example.py
examples/offline_inference/kv_load_failure_recovery/decode_example.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from vllm import LLM, SamplingParams from vllm.config import KVTransferConfig def read_prompts(): """Read prompts from prefill_output.txt""" prompts = [] try: with open("prefill_output.txt") as f: for line in f: prompts.append(line.strip()) print(f"Loaded {len(prompts)} prompts from prefill_output.txt") return prompts except FileNotFoundError: print("Error: prefill_output.txt file not found") exit(-1) def main(): prompts = read_prompts() sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) parser = argparse.ArgumentParser() parser.add_argument( "--simulate-failure", action="store_true", help="Simulate KV load failure." ) parser.add_argument( "--async-load", action="store_true", help="Simulate async KV load" ) args = parser.parse_args() if args.simulate_failure: ktc = KVTransferConfig( kv_connector="LoadRecoveryExampleConnector", kv_role="kv_both", kv_connector_extra_config={ "shared_storage_path": "local_storage", "async_load": args.async_load, }, kv_connector_module_path="load_recovery_example_connector", ) out_file = ( "async_decode_recovered_output.txt" if args.async_load else "sync_decode_recovered_output.txt" ) else: ktc = KVTransferConfig( kv_connector="ExampleConnector", kv_role="kv_both", kv_connector_extra_config={ "shared_storage_path": "local_storage", }, ) out_file = "decode_output.txt" llm = LLM( model="meta-llama/Llama-3.2-1B-Instruct", enforce_eager=True, gpu_memory_utilization=0.8, max_num_batched_tokens=64, max_num_seqs=16, kv_transfer_config=ktc, ) outputs = llm.generate(prompts, sampling_params) sep_str = "-" * 30 with open(out_file, "w", encoding="utf-8") as f: for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text out_str = f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}" print(out_str) print(sep_str) f.write(out_str) f.write(sep_str) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/offline_inference/kv_load_failure_recovery/load_recovery_example_connector.py
examples/offline_inference/kv_load_failure_recovery/load_recovery_example_connector.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 import logging from dataclasses import dataclass, field from typing import TYPE_CHECKING from vllm.config import VllmConfig from vllm.distributed.kv_transfer.kv_connector.v1.base import ( KVConnectorMetadata, KVConnectorRole, ) from vllm.distributed.kv_transfer.kv_connector.v1.example_connector import ( ExampleConnector, ExampleConnectorMetadata, ) from vllm.forward_context import ForwardContext from vllm.v1.core.kv_cache_manager import KVCacheBlocks from vllm.v1.request import Request if TYPE_CHECKING: from vllm.v1.core.sched.output import SchedulerOutput logger = logging.getLogger() logging.basicConfig(level=logging.INFO) @dataclass class LoadRecoveryExampleConnectorMetadata(ExampleConnectorMetadata): req_to_block_ids: dict[str, set[int]] = field(default_factory=dict) @classmethod def from_base(cls, base: ExampleConnectorMetadata): return cls(requests=base.requests) class LoadRecoveryExampleConnector(ExampleConnector): def __init__(self, vllm_config: "VllmConfig", role: KVConnectorRole): super().__init__(vllm_config=vllm_config, role=role) self._async_load = vllm_config.kv_transfer_config.get_from_extra_config( "async_load", False ) self._invalid_block_ids: set = None self._seen_requests: set = set() self._req_to_block_ids: dict[str, list[int]] = dict() def bind_connector_metadata(self, connector_metadata: KVConnectorMetadata) -> None: assert isinstance(connector_metadata, LoadRecoveryExampleConnectorMetadata) index, failed_request = next( ( (i, x) for i, x in enumerate(connector_metadata.requests) if not x.is_store ), (None, None), ) if index is not None: del connector_metadata.requests[index] self._invalid_block_ids = set( ( failed_request.slot_mapping[:: self._block_size] // self._block_size ).tolist() ) logger.info( "Simulating failure to load all KV blocks for the " "first load request. Total blocks: %d", len(self._invalid_block_ids), ) super().bind_connector_metadata(connector_metadata) def clear_connector_metadata(self) -> None: self._invalid_block_ids = None super().clear_connector_metadata() def start_load_kv(self, forward_context: ForwardContext, **kwargs) -> None: if self._async_load and forward_context.attn_metadata is None: # Bypass sanity check in super().start_load_kv forward_context.attn_metadata = "None" super().start_load_kv(forward_context, **kwargs) def get_finished( self, finished_req_ids: set[str] ) -> tuple[set[str] | None, set[str] | None]: if self._async_load: meta = self._get_connector_metadata() assert isinstance(meta, LoadRecoveryExampleConnectorMetadata) if meta.req_to_block_ids: return None, set(meta.req_to_block_ids) return None, None def get_block_ids_with_load_errors(self) -> set[int]: return self._invalid_block_ids def get_num_new_matched_tokens( self, request: Request, num_computed_tokens: int, ) -> tuple[int, bool]: if request.request_id in self._seen_requests: return 0, False self._seen_requests.add(request.request_id) num_tokens, _ = super().get_num_new_matched_tokens(request, num_computed_tokens) return num_tokens, self._async_load and num_tokens > 0 def update_state_after_alloc( self, request: Request, blocks: KVCacheBlocks, num_external_tokens: int ): """ Update KVConnector state after block allocation. If blocks were allocated, add to _requests_need_load, such that we load the KVs in the next forward pass. """ super().update_state_after_alloc(request, blocks, num_external_tokens) if num_external_tokens > 0: self._req_to_block_ids[request.request_id] = blocks.get_block_ids()[0] def build_connector_meta( self, scheduler_output: "SchedulerOutput", ) -> KVConnectorMetadata: if not self._async_load: base = super().build_connector_meta(scheduler_output) meta = LoadRecoveryExampleConnectorMetadata.from_base(base) else: meta = LoadRecoveryExampleConnectorMetadata() if self._requests_need_load: for req_id, request in self._requests_need_load.items(): meta.add_request( token_ids=request.prompt_token_ids, block_ids=self._req_to_block_ids[req_id], block_size=self._block_size, is_store=False, mm_hashes=[], ) # Clear state self._requests_need_load.clear() meta.req_to_block_ids = self._req_to_block_ids self._req_to_block_ids = dict() return meta
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/plugin/prithvi_geospatial_mae_client.py
examples/pooling/plugin/prithvi_geospatial_mae_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import base64 import os import requests # This example shows how to perform an online inference that generates # multimodal data. In this specific case this example will take a geotiff # image as input, process it using the multimodal data processor, and # perform inference. # Requirements : # - install TerraTorch v1.1 (or later): # pip install terratorch>=v1.1 # - start vllm in serving mode with the below args # --model='christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM' # --model-impl terratorch # --trust-remote-code # --skip-tokenizer-init --enforce-eager # --io-processor-plugin terratorch_segmentation # --enable-mm-embeds def main(): image_url = "https://huggingface.co/christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM/resolve/main/valencia_example_2024-10-26.tiff" # noqa: E501 server_endpoint = "http://localhost:8000/pooling" request_payload_url = { "data": { "data": image_url, "data_format": "url", "image_format": "tiff", "out_data_format": "b64_json", }, "priority": 0, "model": "christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM", } ret = requests.post(server_endpoint, json=request_payload_url) print(f"response.status_code: {ret.status_code}") print(f"response.reason:{ret.reason}") response = ret.json() decoded_image = base64.b64decode(response["data"]["data"]) out_path = os.path.join(os.getcwd(), "online_prediction.tiff") with open(out_path, "wb") as f: f.write(decoded_image) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/plugin/prithvi_geospatial_mae_io_processor.py
examples/pooling/plugin/prithvi_geospatial_mae_io_processor.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import base64 import os import torch from vllm import LLM # This example shows how to perform an offline inference that generates # multimodal data. In this specific case this example will take a geotiff # image as input, process it using the multimodal data processor, and # perform inference. # Requirements: # - install TerraTorch v1.1 (or later): # pip install terratorch>=v1.1 def main(): torch.set_default_dtype(torch.float16) image_url = "https://huggingface.co/christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM/resolve/main/valencia_example_2024-10-26.tiff" # noqa: E501 img_prompt = dict( data=image_url, data_format="url", image_format="tiff", out_data_format="b64_json", ) llm = LLM( model="christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM", skip_tokenizer_init=True, trust_remote_code=True, enforce_eager=True, # Limit the maximum number of parallel requests # to avoid the model going OOM. # The maximum number depends on the available GPU memory max_num_seqs=32, io_processor_plugin="terratorch_segmentation", model_impl="terratorch", enable_mm_embeds=True, ) pooler_output = llm.encode(img_prompt, pooling_task="plugin") output = pooler_output[0].outputs print(output) decoded_data = base64.b64decode(output.data) file_path = os.path.join(os.getcwd(), "offline_prediction.tiff") with open(file_path, "wb") as f: f.write(decoded_data) print(f"Output file path: {file_path}") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/plugin/prithvi_geospatial_mae_offline.py
examples/pooling/plugin/prithvi_geospatial_mae_offline.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import datetime import os import albumentations import numpy as np import rasterio import regex as re import torch from einops import rearrange from terratorch.datamodules import Sen1Floods11NonGeoDataModule from vllm import LLM torch.set_default_dtype(torch.float16) NO_DATA = -9999 NO_DATA_FLOAT = 0.0001 OFFSET = 0 PERCENTILE = 99 datamodule_config = { "bands": ["BLUE", "GREEN", "RED", "NIR_NARROW", "SWIR_1", "SWIR_2"], "batch_size": 16, "constant_scale": 0.0001, "data_root": "/dccstor/geofm-finetuning/datasets/sen1floods11", "drop_last": True, "no_data_replace": 0.0, "no_label_replace": -1, "num_workers": 8, "test_transform": [ albumentations.Resize( always_apply=False, height=448, interpolation=1, p=1, width=448 ), albumentations.pytorch.ToTensorV2( transpose_mask=False, always_apply=True, p=1.0 ), ], } class PrithviMAE: def __init__(self, model): self.model = LLM( model=model, skip_tokenizer_init=True, dtype="float16", enforce_eager=True, model_impl="terratorch", enable_mm_embeds=True, ) def run(self, input_data, location_coords): # merge the inputs into one data structure if input_data is not None and input_data.dtype == torch.float32: input_data = input_data.to(torch.float16) input_data = input_data[0] mm_data = { "pixel_values": input_data, "location_coords": location_coords, } prompt = {"prompt_token_ids": [1], "multi_modal_data": mm_data} outputs = self.model.encode(prompt, pooling_task="plugin", use_tqdm=False) return outputs[0].outputs.data def generate_datamodule(): datamodule = Sen1Floods11NonGeoDataModule( data_root=datamodule_config["data_root"], batch_size=datamodule_config["batch_size"], num_workers=datamodule_config["num_workers"], bands=datamodule_config["bands"], drop_last=datamodule_config["drop_last"], test_transform=datamodule_config["test_transform"], ) return datamodule def process_channel_group(orig_img, channels): """ Args: orig_img: torch.Tensor representing original image (reference) with shape = (bands, H, W). channels: list of indices representing RGB channels. Returns: torch.Tensor with shape (num_channels, height, width) for original image """ orig_img = orig_img[channels, ...] valid_mask = torch.ones_like(orig_img, dtype=torch.bool) valid_mask[orig_img == NO_DATA_FLOAT] = False # Rescale (enhancing contrast) max_value = max(3000, np.percentile(orig_img[valid_mask], PERCENTILE)) min_value = OFFSET orig_img = torch.clamp((orig_img - min_value) / (max_value - min_value), 0, 1) # No data as zeros orig_img[~valid_mask] = 0 return orig_img def read_geotiff(file_path: str): """Read all bands from *file_path* and return image + meta info. Args: file_path: path to image file. Returns: np.ndarray with shape (bands, height, width) meta info dict """ with rasterio.open(file_path) as src: img = src.read() meta = src.meta try: coords = src.lnglat() except Exception: # Cannot read coords coords = None return img, meta, coords def save_geotiff(image, output_path: str, meta: dict): """Save multi-band image in Geotiff file. Args: image: np.ndarray with shape (bands, height, width) output_path: path where to save the image meta: dict with meta info. """ with rasterio.open(output_path, "w", **meta) as dest: for i in range(image.shape[0]): dest.write(image[i, :, :], i + 1) return def _convert_np_uint8(float_image: torch.Tensor): image = float_image.numpy() * 255.0 image = image.astype(dtype=np.uint8) return image def load_example( file_paths: list[str], mean: list[float] = None, std: list[float] = None, indices: list[int] | None = None, ): """Build an input example by loading images in *file_paths*. Args: file_paths: list of file paths . mean: list containing mean values for each band in the images in *file_paths*. std: list containing std values for each band in the images in *file_paths*. Returns: np.array containing created example list of meta info for each image in *file_paths* """ imgs = [] metas = [] temporal_coords = [] location_coords = [] for file in file_paths: img, meta, coords = read_geotiff(file) # Rescaling (don't normalize on nodata) img = np.moveaxis(img, 0, -1) # channels last for rescaling if indices is not None: img = img[..., indices] if mean is not None and std is not None: img = np.where(img == NO_DATA, NO_DATA_FLOAT, (img - mean) / std) imgs.append(img) metas.append(meta) if coords is not None: location_coords.append(coords) try: match = re.search(r"(\d{7,8}T\d{6})", file) if match: year = int(match.group(1)[:4]) julian_day = match.group(1).split("T")[0][4:] if len(julian_day) == 3: julian_day = int(julian_day) else: julian_day = ( datetime.datetime.strptime(julian_day, "%m%d") .timetuple() .tm_yday ) temporal_coords.append([year, julian_day]) except Exception as e: print(f"Could not extract timestamp for {file} ({e})") imgs = np.stack(imgs, axis=0) # num_frames, H, W, C imgs = np.moveaxis(imgs, -1, 0).astype("float32") # C, num_frames, H, W imgs = np.expand_dims(imgs, axis=0) # add batch di return imgs, temporal_coords, location_coords, metas def run_model( input_data, temporal_coords, location_coords, model, datamodule, img_size, lightning_model=None, ): # Reflect pad if not divisible by img_size original_h, original_w = input_data.shape[-2:] pad_h = (img_size - (original_h % img_size)) % img_size pad_w = (img_size - (original_w % img_size)) % img_size input_data = np.pad( input_data, ((0, 0), (0, 0), (0, 0), (0, pad_h), (0, pad_w)), mode="reflect" ) # Build sliding window batch_size = 1 # batch = torch.tensor(input_data, device="cpu") batch = torch.tensor(input_data) windows = batch.unfold(3, img_size, img_size).unfold(4, img_size, img_size) h1, w1 = windows.shape[3:5] windows = rearrange( windows, "b c t h1 w1 h w -> (b h1 w1) c t h w", h=img_size, w=img_size ) # Split into batches if number of windows > batch_size num_batches = windows.shape[0] // batch_size if windows.shape[0] > batch_size else 1 windows = torch.tensor_split(windows, num_batches, dim=0) if temporal_coords: temporal_coords = torch.tensor(temporal_coords).unsqueeze(0) else: temporal_coords = None if location_coords: location_coords = torch.tensor(location_coords[0]).unsqueeze(0) else: location_coords = None # Run Prithvi-EO-V2-300M-TL-Sen1Floods11 pred_imgs = [] for x in windows: # Apply standardization x = datamodule.test_transform(image=x.squeeze().numpy().transpose(1, 2, 0)) x = datamodule.aug(x)["image"] with torch.no_grad(): pred = model.run(x, location_coords=location_coords) y_hat = pred.argmax(dim=1) y_hat = torch.nn.functional.interpolate( y_hat.unsqueeze(1).float(), size=img_size, mode="nearest" ) pred_imgs.append(y_hat) pred_imgs = torch.concat(pred_imgs, dim=0) # Build images from patches pred_imgs = rearrange( pred_imgs, "(b h1 w1) c h w -> b c (h1 h) (w1 w)", h=img_size, w=img_size, b=1, c=1, h1=h1, w1=w1, ) # Cut padded area back to original size pred_imgs = pred_imgs[..., :original_h, :original_w] # Squeeze (batch size 1) pred_imgs = pred_imgs[0] return pred_imgs def main( data_file: str, model: str, output_dir: str, rgb_outputs: bool, input_indices: list[int] = None, ): os.makedirs(output_dir, exist_ok=True) model_obj = PrithviMAE(model=model) datamodule = generate_datamodule() img_size = 512 # Size of Sen1Floods11 input_data, temporal_coords, location_coords, meta_data = load_example( file_paths=[data_file], indices=input_indices, ) meta_data = meta_data[0] # only one image if input_data.mean() > 1: input_data = input_data / 10000 # Convert to range 0-1 channels = [ datamodule_config["bands"].index(b) for b in ["RED", "GREEN", "BLUE"] ] # BGR -> RGB pred = run_model( input_data, temporal_coords, location_coords, model_obj, datamodule, img_size ) # Save pred meta_data.update(count=1, dtype="uint8", compress="lzw", nodata=0) pred_file = os.path.join( output_dir, f"pred_{os.path.splitext(os.path.basename(data_file))[0]}.tiff" ) save_geotiff(_convert_np_uint8(pred), pred_file, meta_data) # Save image + pred meta_data.update(count=3, dtype="uint8", compress="lzw", nodata=0) if input_data.mean() < 1: input_data = input_data * 10000 # Scale to 0-10000 rgb_orig = process_channel_group( orig_img=torch.Tensor(input_data[0, :, 0, ...]), channels=channels, ) rgb_orig = rgb_orig.to(torch.float32) pred[pred == 0.0] = np.nan img_pred = rgb_orig * 0.7 + pred * 0.3 img_pred[img_pred.isnan()] = rgb_orig[img_pred.isnan()] img_pred_file = os.path.join( output_dir, f"rgb_pred_{os.path.splitext(os.path.basename(data_file))[0]}.tiff" ) save_geotiff( image=_convert_np_uint8(img_pred), output_path=img_pred_file, meta=meta_data, ) # Save image rgb if rgb_outputs: name_suffix = os.path.splitext(os.path.basename(data_file))[0] rgb_file = os.path.join( output_dir, f"original_rgb_{name_suffix}.tiff", ) save_geotiff( image=_convert_np_uint8(rgb_orig), output_path=rgb_file, meta=meta_data, ) if __name__ == "__main__": parser = argparse.ArgumentParser("MAE run inference", add_help=False) parser.add_argument( "--data_file", type=str, default="./India_900498_S2Hand.tif", help="Path to the file.", ) parser.add_argument( "--model", type=str, default="christian-pinto/Prithvi-EO-2.0-300M-TL-VLLM", help="Path to a checkpoint file to load from.", ) parser.add_argument( "--output_dir", type=str, default="output", help="Path to the directory where to save outputs.", ) parser.add_argument( "--input_indices", default=[1, 2, 3, 8, 11, 12], type=int, nargs="+", help=""" 0-based indices of the six Prithvi channels to be selected from the input. By default selects [1,2,3,8,11,12] for S2L1C data. """, ) parser.add_argument( "--rgb_outputs", action="store_true", help="If present, output files will only contain RGB channels. " "Otherwise, all bands will be saved.", ) args = parser.parse_args() main(**vars(args))
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/token_embed/multi_vector_retrieval_client.py
examples/pooling/token_embed/multi_vector_retrieval_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example online usage of Pooling API for multi vector retrieval. Run `vllm serve <model> --runner pooling` to start up the server in vLLM. e.g. vllm serve BAAI/bge-m3 """ import argparse import requests import torch def post_http_request(prompt: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=prompt) return response def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--model", type=str, default="BAAI/bge-m3") return parser.parse_args() def main(args): api_url = f"http://{args.host}:{args.port}/pooling" model_name = args.model prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] prompt = {"model": model_name, "input": prompts} pooling_response = post_http_request(prompt=prompt, api_url=api_url) for output in pooling_response.json()["data"]: multi_vector = torch.tensor(output["data"]) print(multi_vector.shape) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/token_embed/multi_vector_retrieval.py
examples/pooling/token_embed/multi_vector_retrieval.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from argparse import Namespace from vllm import LLM, EngineArgs from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="BAAI/bge-m3", runner="pooling", enforce_eager=True, ) return parser.parse_args() def main(args: Namespace): # Sample prompts. prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] # Create an LLM. # You should pass runner="pooling" for embedding models llm = LLM(**vars(args)) # Generate embedding. The output is a list of EmbeddingRequestOutputs. outputs = llm.embed(prompts) # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for prompt, output in zip(prompts, outputs): embeds = output.outputs.embedding print(len(embeds)) # Generate embedding for each token. The output is a list of PoolingRequestOutput. outputs = llm.encode(prompts, pooling_task="token_embed") # Print the outputs. print("\nGenerated Outputs:\n" + "-" * 60) for prompt, output in zip(prompts, outputs): multi_vector = output.outputs.data print(multi_vector.shape) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/token_embed/jina_embeddings_v4.py
examples/pooling/token_embed/jina_embeddings_v4.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch from vllm import LLM from vllm.inputs.data import TextPrompt from vllm.multimodal.utils import fetch_image # Initialize model model = LLM( model="jinaai/jina-embeddings-v4-vllm-text-matching", runner="pooling", max_model_len=1024, gpu_memory_utilization=0.8, ) # Create text prompts text1 = "Ein wunderschöner Sonnenuntergang am Strand" text1_prompt = TextPrompt(prompt=f"Query: {text1}") text2 = "浜辺に沈む美しい夕日" text2_prompt = TextPrompt(prompt=f"Query: {text2}") # Create image prompt image = fetch_image( "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/eskimo.jpg" # noqa: E501 ) image_prompt = TextPrompt( prompt="<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe the image.<|im_end|>\n", # noqa: E501 multi_modal_data={"image": image}, ) # Encode all prompts prompts = [text1_prompt, text2_prompt, image_prompt] outputs = model.encode(prompts, pooling_task="token_embed") def get_embeddings(outputs): VISION_START_TOKEN_ID, VISION_END_TOKEN_ID = 151652, 151653 embeddings = [] for output in outputs: if VISION_START_TOKEN_ID in output.prompt_token_ids: # Gather only vision tokens img_start_pos = torch.where( torch.tensor(output.prompt_token_ids) == VISION_START_TOKEN_ID )[0][0] img_end_pos = torch.where( torch.tensor(output.prompt_token_ids) == VISION_END_TOKEN_ID )[0][0] embeddings_tensor = output.outputs.data.detach().clone()[ img_start_pos : img_end_pos + 1 ] else: # Use all tokens for text-only prompts embeddings_tensor = output.outputs.data.detach().clone() # Pool and normalize embeddings pooled_output = ( embeddings_tensor.sum(dim=0, dtype=torch.float32) / embeddings_tensor.shape[0] ) embeddings.append(torch.nn.functional.normalize(pooled_output, dim=-1)) return embeddings embeddings = get_embeddings(outputs) for embedding in embeddings: print(embedding.shape)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/convert_model_to_seq_cls.py
examples/pooling/score/convert_model_to_seq_cls.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 import argparse import json import torch import transformers # Usage: # for BAAI/bge-reranker-v2-gemma # Caution: "Yes" and "yes" are two different tokens # python convert_model_to_seq_cls.py --model_name BAAI/bge-reranker-v2-gemma --classifier_from_tokens '["Yes"]' --method no_post_processing --path ./bge-reranker-v2-gemma-seq-cls # for mxbai-rerank-v2 # python convert_model_to_seq_cls.py --model_name mixedbread-ai/mxbai-rerank-base-v2 --classifier_from_tokens '["0", "1"]' --method from_2_way_softmax --path ./mxbai-rerank-base-v2-seq-cls # for Qwen3-Reranker # python convert_model_to_seq_cls.py --model_name Qwen/Qwen3-Reranker-0.6B --classifier_from_tokens '["no", "yes"]' --method from_2_way_softmax --path ./Qwen3-Reranker-0.6B-seq-cls def from_2_way_softmax(causal_lm, seq_cls_model, tokenizer, tokens, device): # refer to https://huggingface.co/Qwen/Qwen3-Reranker-0.6B/discussions/3 assert len(tokens) == 2 lm_head_weights = causal_lm.lm_head.weight false_id = tokenizer.convert_tokens_to_ids(tokens[0]) true_id = tokenizer.convert_tokens_to_ids(tokens[1]) score_weight = lm_head_weights[true_id].to(device).to( torch.float32 ) - lm_head_weights[false_id].to(device).to(torch.float32) with torch.no_grad(): seq_cls_model.score.weight.copy_(score_weight.unsqueeze(0)) if seq_cls_model.score.bias is not None: seq_cls_model.score.bias.zero_() def no_post_processing(causal_lm, seq_cls_model, tokenizer, tokens, device): lm_head_weights = causal_lm.lm_head.weight token_ids = [tokenizer.convert_tokens_to_ids(t) for t in tokens] score_weight = lm_head_weights[token_ids].to(device) with torch.no_grad(): seq_cls_model.score.weight.copy_(score_weight) if seq_cls_model.score.bias is not None: seq_cls_model.score.bias.zero_() method_map = { function.__name__: function for function in [from_2_way_softmax, no_post_processing] } def converting( model_name, classifier_from_tokens, path, method, use_pad_token=False, device="cpu" ): assert method in method_map if method == "from_2_way_softmax": assert len(classifier_from_tokens) == 2 num_labels = 1 else: num_labels = len(classifier_from_tokens) tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) causal_lm = transformers.AutoModelForCausalLM.from_pretrained( model_name, device_map=device ) seq_cls_model = transformers.AutoModelForSequenceClassification.from_pretrained( model_name, num_labels=num_labels, ignore_mismatched_sizes=True, device_map=device, ) method_map[method]( causal_lm, seq_cls_model, tokenizer, classifier_from_tokens, device ) # `llm as reranker` defaults to not using pad_token seq_cls_model.config.use_pad_token = use_pad_token seq_cls_model.config.pad_token_id = tokenizer.pad_token_id seq_cls_model.save_pretrained(path) tokenizer.save_pretrained(path) def parse_args(): parser = argparse.ArgumentParser( description="Converting *ForCausalLM models to " "*ForSequenceClassification models." ) parser.add_argument( "--model_name", type=str, default="BAAI/bge-reranker-v2-gemma", help="Model name", ) parser.add_argument( "--classifier_from_tokens", type=str, default='["Yes"]', help="classifier from tokens", ) parser.add_argument( "--method", type=str, default="no_post_processing", help="Converting converting" ) parser.add_argument( "--use-pad-token", action="store_true", help="Whether to use pad_token" ) parser.add_argument( "--path", type=str, default="./bge-reranker-v2-gemma-seq-cls", help="Path to save converted model", ) return parser.parse_args() if __name__ == "__main__": args = parse_args() converting( model_name=args.model_name, classifier_from_tokens=json.loads(args.classifier_from_tokens), method=args.method, use_pad_token=args.use_pad_token, path=args.path, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/openai_reranker.py
examples/pooling/score/openai_reranker.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example of using the OpenAI entrypoint's rerank API which is compatible with Jina and Cohere https://jina.ai/reranker run: vllm serve BAAI/bge-reranker-base """ import json import requests url = "http://127.0.0.1:8000/rerank" headers = {"accept": "application/json", "Content-Type": "application/json"} data = { "model": "BAAI/bge-reranker-base", "query": "What is the capital of France?", "documents": [ "The capital of Brazil is Brasilia.", "The capital of France is Paris.", "Horses and cows are both animals", ], } def main(): response = requests.post(url, headers=headers, json=data) # Check the response if response.status_code == 200: print("Request successful!") print(json.dumps(response.json(), indent=2)) else: print(f"Request failed with status code: {response.status_code}") print(response.text) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/openai_cross_encoder_score.py
examples/pooling/score/openai_cross_encoder_score.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example online usage of Score API. Run `vllm serve <model> --runner pooling` to start up the server in vLLM. """ import argparse import pprint import requests def post_http_request(prompt: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=prompt) return response def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--model", type=str, default="BAAI/bge-reranker-v2-m3") return parser.parse_args() def main(args): api_url = f"http://{args.host}:{args.port}/score" model_name = args.model text_1 = "What is the capital of Brazil?" text_2 = "The capital of Brazil is Brasilia." prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} score_response = post_http_request(prompt=prompt, api_url=api_url) print("\nPrompt when text_1 and text_2 are both strings:") pprint.pprint(prompt) print("\nScore Response:") pprint.pprint(score_response.json()) text_1 = "What is the capital of France?" text_2 = ["The capital of Brazil is Brasilia.", "The capital of France is Paris."] prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} score_response = post_http_request(prompt=prompt, api_url=api_url) print("\nPrompt when text_1 is string and text_2 is a list:") pprint.pprint(prompt) print("\nScore Response:") pprint.pprint(score_response.json()) text_1 = ["What is the capital of Brazil?", "What is the capital of France?"] text_2 = ["The capital of Brazil is Brasilia.", "The capital of France is Paris."] prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} score_response = post_http_request(prompt=prompt, api_url=api_url) print("\nPrompt when text_1 and text_2 are both lists:") pprint.pprint(prompt) print("\nScore Response:") pprint.pprint(score_response.json()) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/offline_using_template.py
examples/pooling/score/offline_using_template.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 from pathlib import Path from vllm import LLM model_name = "nvidia/llama-nemotron-rerank-1b-v2" # Path to template file template_path = Path(__file__).parent / "template" / "nemotron-rerank.jinja" chat_template = template_path.read_text() llm = LLM(model=model_name, runner="pooling", trust_remote_code=True) query = "how much protein should a female eat?" documents = [ "As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments.", "Calorie intake should not fall below 1,200 a day in women or 1,500 a day in men, except under the supervision of a health professional.", ] outputs = llm.score(query, documents, chat_template=chat_template) print("-" * 30) print([output.outputs.score for output in outputs]) print("-" * 30)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/cohere_rerank_client.py
examples/pooling/score/cohere_rerank_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example of using the OpenAI entrypoint's rerank API which is compatible with the Cohere SDK: https://github.com/cohere-ai/cohere-python Note that `pip install cohere` is needed to run this example. run: vllm serve BAAI/bge-reranker-base """ import cohere from cohere import Client, ClientV2 model = "BAAI/bge-reranker-base" query = "What is the capital of France?" documents = [ "The capital of France is Paris", "Reranking is fun!", "vLLM is an open-source framework for fast AI serving", ] def cohere_rerank( client: Client | ClientV2, model: str, query: str, documents: list[str] ) -> dict: return client.rerank(model=model, query=query, documents=documents) def main(): # cohere v1 client cohere_v1 = cohere.Client(base_url="http://localhost:8000", api_key="sk-fake-key") rerank_v1_result = cohere_rerank(cohere_v1, model, query, documents) print("-" * 50) print("rerank_v1_result:\n", rerank_v1_result) print("-" * 50) # or the v2 cohere_v2 = cohere.ClientV2("sk-fake-key", base_url="http://localhost:8000") rerank_v2_result = cohere_rerank(cohere_v2, model, query, documents) print("rerank_v2_result:\n", rerank_v2_result) print("-" * 50) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/online_using_template.py
examples/pooling/score/online_using_template.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 """ Example of using the rerank API with template. run: vllm serve nvidia/llama-nemotron-rerank-1b-v2 --runner pooling --trust-remote-code --chat-template examples/pooling/score/template/nemotron-rerank.jinja """ import json import requests url = "http://127.0.0.1:8000/rerank" headers = {"accept": "application/json", "Content-Type": "application/json"} query = "how much protein should a female eat?" documents = [ "As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments.", "Calorie intake should not fall below 1,200 a day in women or 1,500 a day in men, except under the supervision of a health professional.", ] data = { "model": "nvidia/llama-nemotron-rerank-1b-v2", "query": query, "documents": documents, } def main(): response = requests.post(url, headers=headers, json=data) # Check the response if response.status_code == 200: print("Request successful!") print(json.dumps(response.json(), indent=2)) else: print(f"Request failed with status code: {response.status_code}") print(response.text) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/offline_reranker.py
examples/pooling/score/offline_reranker.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 from vllm import LLM model_name = "Qwen/Qwen3-Reranker-0.6B" # What is the difference between the official original version and one # that has been converted into a sequence classification model? # Qwen3-Reranker is a language model that doing reranker by using the # logits of "no" and "yes" tokens. # It needs to computing 151669 tokens logits, making this method extremely # inefficient, not to mention incompatible with the vllm score API. # A method for converting the original model into a sequence classification # model was proposed. See:https://huggingface.co/Qwen/Qwen3-Reranker-0.6B/discussions/3 # Models converted offline using this method can not only be more efficient # and support the vllm score API, but also make the init parameters more # concise, for example. # llm = LLM(model="tomaarsen/Qwen3-Reranker-0.6B-seq-cls", runner="pooling") # If you want to load the official original version, the init parameters are # as follows. def get_llm() -> LLM: """Initializes and returns the LLM model for Qwen3-Reranker.""" return LLM( model=model_name, runner="pooling", hf_overrides={ "architectures": ["Qwen3ForSequenceClassification"], "classifier_from_token": ["no", "yes"], "is_original_qwen3_reranker": True, }, ) # Why do we need hf_overrides for the official original version: # vllm converts it to Qwen3ForSequenceClassification when loaded for # better performance. # - Firstly, we need using `"architectures": ["Qwen3ForSequenceClassification"],` # to manually route to Qwen3ForSequenceClassification. # - Then, we will extract the vector corresponding to classifier_from_token # from lm_head using `"classifier_from_token": ["no", "yes"]`. # - Third, we will convert these two vectors into one vector. The use of # conversion logic is controlled by `using "is_original_qwen3_reranker": True`. # Please use the query_template and document_template to format the query and # document for better reranker results. prefix = '<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be "yes" or "no".<|im_end|>\n<|im_start|>user\n' suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n" query_template = "{prefix}<Instruct>: {instruction}\n<Query>: {query}\n" document_template = "<Document>: {doc}{suffix}" def main() -> None: instruction = ( "Given a web search query, retrieve relevant passages that answer the query" ) queries = [ "What is the capital of China?", "Explain gravity", ] documents = [ "The capital of China is Beijing.", "Gravity is a force that attracts two bodies towards each other. It gives weight to physical objects and is responsible for the movement of planets around the sun.", ] queries = [ query_template.format(prefix=prefix, instruction=instruction, query=query) for query in queries ] documents = [document_template.format(doc=doc, suffix=suffix) for doc in documents] llm = get_llm() outputs = llm.score(queries, documents) print("-" * 30) print([output.outputs.score for output in outputs]) print("-" * 30) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/score/openai_cross_encoder_score_for_multimodal.py
examples/pooling/score/openai_cross_encoder_score_for_multimodal.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example online usage of Score API. Run `vllm serve <model> --runner pooling` to start up the server in vLLM. """ import argparse import pprint import requests def post_http_request(prompt: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=prompt) return response def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--model", type=str, default="jinaai/jina-reranker-m0") return parser.parse_args() def main(args): api_url = f"http://{args.host}:{args.port}/score" model_name = args.model text_1 = "slm markdown" text_2 = { "content": [ { "type": "image_url", "image_url": { "url": "https://raw.githubusercontent.com/jina-ai/multimodal-reranker-test/main/handelsblatt-preview.png" }, }, { "type": "image_url", "image_url": { "url": "https://raw.githubusercontent.com/jina-ai/multimodal-reranker-test/main/paper-11.png" }, }, ] } prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} score_response = post_http_request(prompt=prompt, api_url=api_url) print("\nPrompt when text_1 is string and text_2 is a image list:") pprint.pprint(prompt) print("\nScore Response:") pprint.pprint(score_response.json()) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/pooling/openai_pooling_client.py
examples/pooling/pooling/openai_pooling_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example online usage of Pooling API. Run `vllm serve <model> --runner pooling` to start up the server in vLLM. e.g. vllm serve internlm/internlm2-1_8b-reward --trust-remote-code """ import argparse import pprint import requests def post_http_request(prompt: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=prompt) return response def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--model", type=str, default="internlm/internlm2-1_8b-reward") return parser.parse_args() def main(args): api_url = f"http://{args.host}:{args.port}/pooling" model_name = args.model # Input like Completions API prompt = {"model": model_name, "input": "vLLM is great!"} pooling_response = post_http_request(prompt=prompt, api_url=api_url) print("-" * 50) print("Pooling Response:") pprint.pprint(pooling_response.json()) print("-" * 50) # Input like Chat API prompt = { "model": model_name, "messages": [ { "role": "user", "content": [{"type": "text", "text": "vLLM is great!"}], } ], } pooling_response = post_http_request(prompt=prompt, api_url=api_url) print("Pooling Response:") pprint.pprint(pooling_response.json()) print("-" * 50) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/pooling/vision_language_pooling.py
examples/pooling/pooling/vision_language_pooling.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This example shows how to use vLLM for running offline inference with the correct prompt format on vision language models for multimodal pooling. For most models, the prompt format should follow corresponding examples on HuggingFace model repository. """ from argparse import Namespace from dataclasses import asdict from pathlib import Path from typing import Literal, NamedTuple, TypeAlias, TypedDict, get_args from PIL.Image import Image from vllm import LLM, EngineArgs from vllm.entrypoints.score_utils import ScoreMultiModalParam from vllm.multimodal.utils import fetch_image from vllm.utils.argparse_utils import FlexibleArgumentParser ROOT_DIR = Path(__file__).parent.parent.parent EXAMPLES_DIR = ROOT_DIR / "examples" class TextQuery(TypedDict): modality: Literal["text"] text: str class ImageQuery(TypedDict): modality: Literal["image"] image: Image class TextImageQuery(TypedDict): modality: Literal["text+image"] text: str image: Image class TextImagesQuery(TypedDict): modality: Literal["text+images"] text: str image: ScoreMultiModalParam QueryModality = Literal["text", "image", "text+image", "text+images"] Query: TypeAlias = TextQuery | ImageQuery | TextImageQuery | TextImagesQuery class ModelRequestData(NamedTuple): engine_args: EngineArgs prompt: str | None = None image: Image | None = None query: str | None = None documents: ScoreMultiModalParam | None = None def run_clip(query: Query) -> ModelRequestData: if query["modality"] == "text": prompt = query["text"] image = None elif query["modality"] == "image": prompt = "" # For image input, make sure that the prompt text is empty image = query["image"] else: modality = query["modality"] raise ValueError(f"Unsupported query modality: '{modality}'") engine_args = EngineArgs( model="openai/clip-vit-base-patch32", runner="pooling", limit_mm_per_prompt={"image": 1}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image=image, ) def run_e5_v(query: Query) -> ModelRequestData: llama3_template = "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n \n" # noqa: E501 if query["modality"] == "text": text = query["text"] prompt = llama3_template.format(f"{text}\nSummary above sentence in one word: ") image = None elif query["modality"] == "image": prompt = llama3_template.format("<image>\nSummary above image in one word: ") image = query["image"] else: modality = query["modality"] raise ValueError(f"Unsupported query modality: '{modality}'") engine_args = EngineArgs( model="royokong/e5-v", runner="pooling", max_model_len=4096, limit_mm_per_prompt={"image": 1}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image=image, ) def run_jinavl_reranker(query: Query) -> ModelRequestData: if query["modality"] != "text+images": raise ValueError(f"Unsupported query modality: '{query['modality']}'") engine_args = EngineArgs( model="jinaai/jina-reranker-m0", runner="pooling", max_model_len=32768, trust_remote_code=True, mm_processor_kwargs={ "min_pixels": 3136, "max_pixels": 602112, }, limit_mm_per_prompt={"image": 1}, ) return ModelRequestData( engine_args=engine_args, query=query["text"], documents=query["image"], ) def run_siglip(query: Query) -> ModelRequestData: if query["modality"] == "text": prompt = query["text"] image = None elif query["modality"] == "image": prompt = "" # For image input, make sure that the prompt text is empty image = query["image"] else: modality = query["modality"] raise ValueError(f"Unsupported query modality: '{modality}'") engine_args = EngineArgs( model="google/siglip-base-patch16-224", runner="pooling", limit_mm_per_prompt={"image": 1}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image=image, ) def _get_vlm2vec_prompt_image(query: Query, image_token: str): if query["modality"] == "text": text = query["text"] prompt = f"Find me an everyday image that matches the given caption: {text}" image = None elif query["modality"] == "image": prompt = f"{image_token} Find a day-to-day image that looks similar to the provided image." # noqa: E501 image = query["image"] elif query["modality"] == "text+image": text = query["text"] prompt = f"{image_token} Represent the given image with the following question: {text}" # noqa: E501 image = query["image"] else: modality = query["modality"] raise ValueError(f"Unsupported query modality: {modality!r}") return prompt, image def run_vlm2vec_phi3v(query: Query) -> ModelRequestData: prompt, image = _get_vlm2vec_prompt_image(query, "<|image_1|>") engine_args = EngineArgs( model="TIGER-Lab/VLM2Vec-Full", runner="pooling", max_model_len=4096, trust_remote_code=True, mm_processor_kwargs={"num_crops": 4}, limit_mm_per_prompt={"image": 1}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image=image, ) def run_vlm2vec_qwen2vl(query: Query) -> ModelRequestData: # vLLM does not support LoRA adapters on multi-modal encoder, # so we merge the weights first from huggingface_hub.constants import HF_HUB_CACHE from peft import PeftConfig, PeftModel from transformers import AutoModelForImageTextToText, AutoProcessor from vllm.entrypoints.chat_utils import load_chat_template model_id = "TIGER-Lab/VLM2Vec-Qwen2VL-2B" base_model = AutoModelForImageTextToText.from_pretrained(model_id) lora_model = PeftModel.from_pretrained( base_model, model_id, config=PeftConfig.from_pretrained(model_id), ) model = lora_model.merge_and_unload().to(dtype=base_model.dtype) model._hf_peft_config_loaded = False # Needed to save the merged model processor = AutoProcessor.from_pretrained( model_id, # `min_pixels` and `max_pixels` are deprecated for # transformers `preprocessor_config.json` size={"shortest_edge": 3136, "longest_edge": 12845056}, ) processor.chat_template = load_chat_template( # The original chat template is not correct EXAMPLES_DIR / "template_vlm2vec_qwen2vl.jinja", ) merged_path = str( Path(HF_HUB_CACHE) / ("models--" + model_id.replace("/", "--") + "-vllm") ) print(f"Saving merged model to {merged_path}...") print( "NOTE: This directory is not tracked by `huggingface_hub` " "so you have to delete this manually if you don't want it anymore." ) model.save_pretrained(merged_path) processor.save_pretrained(merged_path) print("Done!") prompt, image = _get_vlm2vec_prompt_image(query, "<|image_pad|>") engine_args = EngineArgs( model=merged_path, runner="pooling", max_model_len=4096, mm_processor_kwargs={ "min_pixels": 3136, "max_pixels": 12845056, }, limit_mm_per_prompt={"image": 1}, ) return ModelRequestData( engine_args=engine_args, prompt=prompt, image=image, ) def get_query(modality: QueryModality): if modality == "text": return TextQuery(modality="text", text="A dog sitting in the grass") if modality == "image": return ImageQuery( modality="image", image=fetch_image( "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/eskimo.jpg" # noqa: E501 ), ) if modality == "text+image": return TextImageQuery( modality="text+image", text="A cat standing in the snow.", image=fetch_image( "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/cat_snow.jpg" # noqa: E501 ), ) if modality == "text+images": return TextImagesQuery( modality="text+images", text="slm markdown", image={ "content": [ { "type": "image_url", "image_url": { "url": "https://raw.githubusercontent.com/jina-ai/multimodal-reranker-test/main/handelsblatt-preview.png" }, }, { "type": "image_url", "image_url": { "url": "https://raw.githubusercontent.com/jina-ai/multimodal-reranker-test/main/paper-11.png" }, }, ] }, ) msg = f"Modality {modality} is not supported." raise ValueError(msg) def run_encode(model: str, modality: QueryModality, seed: int): query = get_query(modality) req_data = model_example_map[model](query) # Disable other modalities to save memory default_limits = {"image": 0, "video": 0, "audio": 0} req_data.engine_args.limit_mm_per_prompt = default_limits | dict( req_data.engine_args.limit_mm_per_prompt or {} ) engine_args = asdict(req_data.engine_args) | {"seed": seed} llm = LLM(**engine_args) mm_data = {} if req_data.image is not None: mm_data["image"] = req_data.image outputs = llm.embed( { "prompt": req_data.prompt, "multi_modal_data": mm_data, } ) print("-" * 50) for output in outputs: print(output.outputs.embedding) print("-" * 50) def run_score(model: str, modality: QueryModality, seed: int): query = get_query(modality) req_data = model_example_map[model](query) engine_args = asdict(req_data.engine_args) | {"seed": seed} llm = LLM(**engine_args) outputs = llm.score(req_data.query, req_data.documents) print("-" * 30) print([output.outputs.score for output in outputs]) print("-" * 30) model_example_map = { "clip": run_clip, "e5_v": run_e5_v, "jinavl_reranker": run_jinavl_reranker, "siglip": run_siglip, "vlm2vec_phi3v": run_vlm2vec_phi3v, "vlm2vec_qwen2vl": run_vlm2vec_qwen2vl, } def parse_args(): parser = FlexibleArgumentParser( description="Demo on using vLLM for offline inference with " "vision language models for multimodal pooling tasks." ) parser.add_argument( "--model-name", "-m", type=str, default="vlm2vec_phi3v", choices=model_example_map.keys(), help="The name of the embedding model.", ) parser.add_argument( "--task", "-t", type=str, default="embedding", choices=["embedding", "scoring"], help="The task type.", ) parser.add_argument( "--modality", type=str, default="image", choices=get_args(QueryModality), help="Modality of the input.", ) parser.add_argument( "--seed", type=int, default=0, help="Set the seed when initializing `vllm.LLM`.", ) return parser.parse_args() def main(args: Namespace): if args.task == "embedding": run_encode(args.model_name, args.modality, args.seed) elif args.task == "scoring": run_score(args.model_name, args.modality, args.seed) else: raise ValueError(f"Unsupported task: {args.task}") if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/embedding_requests_base64_client.py
examples/pooling/embed/embedding_requests_base64_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example Python client for embedding API using vLLM API server NOTE: start a supported embeddings model server with `vllm serve`, e.g. vllm serve intfloat/e5-small """ import argparse import base64 import requests import torch from vllm.utils.serial_utils import ( EMBED_DTYPE_TO_TORCH_DTYPE, ENDIANNESS, binary2tensor, ) def post_http_request(prompt: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=prompt) return response def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--model", type=str, default="intfloat/e5-small") return parser.parse_args() def main(args): api_url = f"http://{args.host}:{args.port}/v1/embeddings" model_name = args.model # The OpenAI client does not support the embed_dtype and endianness parameters. for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE: for endianness in ENDIANNESS: prompt = { "model": model_name, "input": "vLLM is great!", "encoding_format": "base64", "embed_dtype": embed_dtype, "endianness": endianness, } response = post_http_request(prompt=prompt, api_url=api_url) embedding = [] for data in response.json()["data"]: binary = base64.b64decode(data["embedding"]) tensor = binary2tensor(binary, (-1,), embed_dtype, endianness) embedding.append(tensor.to(torch.float32)) embedding = torch.cat(embedding) print(embed_dtype, endianness, embedding.shape) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/embed_jina_embeddings_v3.py
examples/pooling/embed/embed_jina_embeddings_v3.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from argparse import Namespace from vllm import LLM, EngineArgs from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="jinaai/jina-embeddings-v3", runner="pooling", trust_remote_code=True, ) return parser.parse_args() def main(args: Namespace): # Sample prompts. prompts = [ "Follow the white rabbit.", # English "Sigue al conejo blanco.", # Spanish "Suis le lapin blanc.", # French "跟着白兔走。", # Chinese "اتبع الأرنب الأبيض.", # Arabic "Folge dem weißen Kaninchen.", # German ] # Create an LLM. # You should pass runner="pooling" for embedding models llm = LLM(**vars(args)) # Generate embedding. The output is a list of EmbeddingRequestOutputs. # Only text matching task is supported for now. See #16120 outputs = llm.embed(prompts) # Print the outputs. print("\nGenerated Outputs:") print("Only text matching task is supported for now. See #16120") print("-" * 60) for prompt, output in zip(prompts, outputs): embeds = output.outputs.embedding embeds_trimmed = ( (str(embeds[:16])[:-1] + ", ...]") if len(embeds) > 16 else embeds ) print( f"Prompt: {prompt!r} \n" f"Embeddings for text matching: {embeds_trimmed} " f"(size={len(embeds)})" ) print("-" * 60) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/openai_chat_embedding_client_for_multimodal.py
examples/pooling/embed/openai_chat_embedding_client_for_multimodal.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 """Example Python client for multimodal embedding API using vLLM API server. Refer to each `run_*` function for the command to run the server for that model. """ import argparse import base64 import io from typing import Literal from openai import OpenAI from openai._types import NOT_GIVEN, NotGiven from openai.types.chat import ChatCompletionMessageParam from openai.types.create_embedding_response import CreateEmbeddingResponse from PIL import Image # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" image_url = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" def create_chat_embeddings( client: OpenAI, *, messages: list[ChatCompletionMessageParam], model: str, encoding_format: Literal["base64", "float"] | NotGiven = NOT_GIVEN, ) -> CreateEmbeddingResponse: """ Convenience function for accessing vLLM's Chat Embeddings API, which is an extension of OpenAI's existing Embeddings API. """ return client.post( "/embeddings", cast_to=CreateEmbeddingResponse, body={"messages": messages, "model": model, "encoding_format": encoding_format}, ) def run_clip(client: OpenAI, model: str): """ Start the server using: vllm serve openai/clip-vit-base-patch32 \ --runner pooling """ response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ {"type": "image_url", "image_url": {"url": image_url}}, ], } ], model=model, encoding_format="float", ) print("Image embedding output:", response.data[0].embedding) response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ {"type": "text", "text": "a photo of a cat"}, ], } ], model=model, encoding_format="float", ) print("Text embedding output:", response.data[0].embedding) def run_dse_qwen2_vl(client: OpenAI, model: str): """ Start the server using: vllm serve MrLight/dse-qwen2-2b-mrl-v1 \ --runner pooling \ --trust-remote-code \ --max-model-len 8192 \ --chat-template examples/template_dse_qwen2_vl.jinja """ response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ { "type": "image_url", "image_url": { "url": image_url, }, }, {"type": "text", "text": "What is shown in this image?"}, ], } ], model=model, encoding_format="float", ) print("Image embedding output:", response.data[0].embedding) # MrLight/dse-qwen2-2b-mrl-v1 requires a placeholder image # of the minimum input size buffer = io.BytesIO() image_placeholder = Image.new("RGB", (56, 56)) image_placeholder.save(buffer, "png") buffer.seek(0) image_placeholder = base64.b64encode(buffer.read()).decode("utf-8") response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{image_placeholder}", }, }, {"type": "text", "text": "Query: What is the weather like today?"}, ], } ], model=model, encoding_format="float", ) print("Text embedding output:", response.data[0].embedding) def run_siglip(client: OpenAI, model: str): """ Start the server using: vllm serve google/siglip-base-patch16-224 \ --runner pooling \ --chat-template template_basic.jinja """ response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ {"type": "image_url", "image_url": {"url": image_url}}, ], } ], model=model, encoding_format="float", ) print("Image embedding output:", response.data[0].embedding) response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ {"type": "text", "text": "a photo of a cat"}, ], } ], model=model, encoding_format="float", ) print("Text embedding output:", response.data[0].embedding) def run_vlm2vec(client: OpenAI, model: str): """ Start the server using: vllm serve TIGER-Lab/VLM2Vec-Full \ --runner pooling \ --trust-remote-code \ --max-model-len 4096 \ --chat-template examples/template_vlm2vec_phi3v.jinja """ response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ {"type": "image_url", "image_url": {"url": image_url}}, {"type": "text", "text": "Represent the given image."}, ], } ], model=model, encoding_format="float", ) print("Image embedding output:", response.data[0].embedding) response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ {"type": "image_url", "image_url": {"url": image_url}}, { "type": "text", "text": "Represent the given image with the following question: What is in the image.", }, ], } ], model=model, encoding_format="float", ) print("Image+Text embedding output:", response.data[0].embedding) response = create_chat_embeddings( client, messages=[ { "role": "user", "content": [ {"type": "text", "text": "A cat and a dog"}, ], } ], model=model, encoding_format="float", ) print("Text embedding output:", response.data[0].embedding) model_example_map = { "clip": run_clip, "dse_qwen2_vl": run_dse_qwen2_vl, "siglip": run_siglip, "vlm2vec": run_vlm2vec, } def parse_args(): parser = argparse.ArgumentParser( "Script to call a specified VLM through the API. Make sure to serve " "the model with `--runner pooling` before running this." ) parser.add_argument( "--model", type=str, choices=model_example_map.keys(), required=True, help="The name of the embedding model.", ) return parser.parse_args() def main(args): client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model_id = models.data[0].id model_example_map[args.model](client, model_id) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/embed_matryoshka_fy.py
examples/pooling/embed/embed_matryoshka_fy.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from argparse import Namespace from vllm import LLM, EngineArgs, PoolingParams from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="jinaai/jina-embeddings-v3", runner="pooling", trust_remote_code=True, ) return parser.parse_args() def main(args: Namespace): # Sample prompts. prompts = [ "Follow the white rabbit.", # English "Sigue al conejo blanco.", # Spanish "Suis le lapin blanc.", # French "跟着白兔走。", # Chinese "اتبع الأرنب الأبيض.", # Arabic "Folge dem weißen Kaninchen.", # German ] # Create an LLM. # You should pass runner="pooling" for embedding models llm = LLM(**vars(args)) # Generate embedding. The output is a list of EmbeddingRequestOutputs. outputs = llm.embed(prompts, pooling_params=PoolingParams(dimensions=32)) # Print the outputs. print("\nGenerated Outputs:") print("-" * 60) for prompt, output in zip(prompts, outputs): embeds = output.outputs.embedding embeds_trimmed = ( (str(embeds[:16])[:-1] + ", ...]") if len(embeds) > 16 else embeds ) print(f"Prompt: {prompt!r} \nEmbeddings: {embeds_trimmed} (size={len(embeds)})") print("-" * 60) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/embedding_requests_bytes_client.py
examples/pooling/embed/embedding_requests_bytes_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example Python client for embedding API using vLLM API server NOTE: start a supported embeddings model server with `vllm serve`, e.g. vllm serve intfloat/e5-small """ import argparse import json import requests import torch from vllm.utils.serial_utils import ( EMBED_DTYPE_TO_TORCH_DTYPE, ENDIANNESS, MetadataItem, build_metadata_items, decode_pooling_output, ) def post_http_request(prompt: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=prompt) return response def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--model", type=str, default="intfloat/e5-small") return parser.parse_args() def main(args): api_url = f"http://{args.host}:{args.port}/v1/embeddings" model_name = args.model embedding_size = 0 input_texts = [ "The best thing about vLLM is that it supports many different models", ] * 2 # The OpenAI client does not support the bytes encoding_format. # The OpenAI client does not support the embed_dtype and endianness parameters. for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE: for endianness in ENDIANNESS: prompt = { "model": model_name, "input": input_texts, "encoding_format": "bytes", "embed_dtype": embed_dtype, "endianness": endianness, } response = post_http_request(prompt=prompt, api_url=api_url) metadata = json.loads(response.headers["metadata"]) body = response.content items = [MetadataItem(**x) for x in metadata["data"]] embedding = decode_pooling_output(items=items, body=body) embedding = [x.to(torch.float32) for x in embedding] embedding = torch.stack(embedding) embedding_size = embedding.shape[-1] print(embed_dtype, endianness, embedding.shape) # The vllm server always sorts the returned embeddings in the order of input. So # returning metadata is not necessary. You can set encoding_format to bytes_only # to let the server not return metadata. for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE: for endianness in ENDIANNESS: prompt = { "model": model_name, "input": input_texts, "encoding_format": "bytes_only", "embed_dtype": embed_dtype, "endianness": endianness, } response = post_http_request(prompt=prompt, api_url=api_url) body = response.content items = build_metadata_items( embed_dtype=embed_dtype, endianness=endianness, shape=(embedding_size,), n_request=len(input_texts), ) embedding = decode_pooling_output(items=items, body=body) embedding = [x.to(torch.float32) for x in embedding] embedding = torch.stack(embedding) print(embed_dtype, endianness, embedding.shape) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/openai_embedding_matryoshka_fy.py
examples/pooling/embed/openai_embedding_matryoshka_fy.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example Python client for embedding API dimensions using vLLM API server NOTE: start a supported Matryoshka Embeddings model server with `vllm serve`, e.g. vllm serve jinaai/jina-embeddings-v3 --trust-remote-code """ from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" def main(): client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id responses = client.embeddings.create( input=["Follow the white rabbit."], model=model, dimensions=32, ) for data in responses.data: print(data.embedding) # List of float of len 32 if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/openai_embedding_client.py
examples/pooling/embed/openai_embedding_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example Python client for embedding API using vLLM API server NOTE: start a supported embeddings model server with `vllm serve`, e.g. vllm serve intfloat/e5-small """ from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" def main(): client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id responses = client.embeddings.create( # ruff: noqa: E501 input=[ "Hello my name is", "The best thing about vLLM is that it supports many different models", ], model=model, ) for data in responses.data: print(data.embedding) # List of float of len 4096 if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/embed/openai_embedding_long_text/client.py
examples/pooling/embed/openai_embedding_long_text/client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example script demonstrating long text embedding with chunked processing in vLLM. This example shows how to use vLLM's chunked processing feature to handle text inputs that exceed the model's maximum token length. The feature automatically splits long text into chunks and handles different pooling types optimally. Prerequisites: 1. Start vLLM server with chunked processing enabled: # MEAN pooling (processes all chunks, recommended for complete coverage) vllm serve intfloat/multilingual-e5-large \ --pooler-config \ '{"pooling_type": "MEAN", "normalize": true, ' \ '"enable_chunked_processing": true, "max_embed_len": 3072000}' \ --served-model-name multilingual-e5-large \ --trust-remote-code \ --port 31090 \ --api-key your-api-key # OR CLS pooling (native CLS within chunks, MEAN aggregation across chunks) vllm serve BAAI/bge-large-en-v1.5 \ --pooler-config \ '{"pooling_type": "CLS", "normalize": true, ' \ '"enable_chunked_processing": true, "max_embed_len": 1048576}' \ --served-model-name bge-large-en-v1.5 \ --trust-remote-code \ --port 31090 \ --api-key your-api-key 2. Install required dependencies: pip install openai requests """ import time import numpy as np from openai import OpenAI # Configuration API_KEY = "your-api-key" # Replace with your actual API key BASE_URL = "http://localhost:31090/v1" MODEL_NAME = "multilingual-e5-large" def generate_long_text(base_text: str, repeat_count: int) -> str: """Generate long text by repeating base text.""" return base_text * repeat_count def test_embedding_with_different_lengths(): """Test embedding generation with different text lengths.""" client = OpenAI(api_key=API_KEY, base_url=BASE_URL) # Test cases with different text lengths test_cases = [ { "name": "Short Text", "text": "Hello, this is a short text for embedding.", "expected_chunks": 1, }, { "name": "Medium Text", "text": generate_long_text( "This is a medium-length text that should fit within the " "model's context window. " * 20, 2, ), "expected_chunks": 1, }, { "name": "Long Text (2 chunks)", "text": generate_long_text( "This is a very long text that will exceed the model's " "maximum context length and trigger chunked processing. " * 50, 5, ), "expected_chunks": 2, }, { "name": "Very Long Text (3+ chunks)", "text": generate_long_text( "This text is extremely long and will definitely " "require multiple chunks for processing. " * 100, 10, ), "expected_chunks": 3, }, ] print("🧪 Testing vLLM Long Text Embedding with Chunked Processing") print("=" * 70) for i, test_case in enumerate(test_cases, 1): print(f"\n📝 Test {i}: {test_case['name']}") print(f"Text length: {len(test_case['text'])} characters") try: start_time = time.time() response = client.embeddings.create( input=test_case["text"], model=MODEL_NAME, encoding_format="float" ) end_time = time.time() processing_time = end_time - start_time # Extract embedding data embedding = response.data[0].embedding embedding_dim = len(embedding) print("✅ Success!") print(f" - Embedding dimension: {embedding_dim}") print(f" - Processing time: {processing_time:.2f}s") print(f" - Expected chunks: ~{test_case['expected_chunks']}") print(f" - First 5 values: {embedding[:5]}") except Exception as e: print(f"❌ Failed: {str(e)}") def test_batch_embedding(): """Test batch embedding with mixed-length inputs.""" client = OpenAI(api_key=API_KEY, base_url=BASE_URL) print("\n🔄 Testing Batch Embedding with Mixed Lengths") print("=" * 50) # Mix of short and long texts batch_inputs = [ "Short text 1", generate_long_text("Medium length text that fits in one chunk. " * 20, 1), "Another short text", generate_long_text("Long text requiring chunked processing. " * 100, 5), ] try: start_time = time.time() response = client.embeddings.create( input=batch_inputs, model=MODEL_NAME, encoding_format="float" ) end_time = time.time() processing_time = end_time - start_time print("✅ Batch processing successful!") print(f" - Number of inputs: {len(batch_inputs)}") print(f" - Number of embeddings: {len(response.data)}") print(f" - Total processing time: {processing_time:.2f}s") print( f" - Average time per input: {processing_time / len(batch_inputs):.2f}s" ) for i, data in enumerate(response.data): input_length = len(batch_inputs[i]) embedding_dim = len(data.embedding) print( f" - Input {i + 1}: {input_length} chars → {embedding_dim}D embedding" ) except Exception as e: print(f"❌ Batch processing failed: {str(e)}") def test_multiple_long_texts_batch(): """Test batch processing with multiple long texts to verify chunk ID uniqueness.""" client = OpenAI(api_key=API_KEY, base_url=BASE_URL) print("\n🔧 Testing Multiple Long Texts in Batch (Chunk ID Fix Verification)") print("=" * 70) # Create multiple distinct long texts that will all require chunking # Note: All pooling types now use MEAN aggregation across chunks: # - Native pooling (MEAN/CLS/LAST) is used within each chunk # - MEAN aggregation combines results across all chunks # - Full semantic coverage for all pooling types long_texts = [ generate_long_text( "First long document about artificial intelligence and machine learning. " * 80, 6, ), generate_long_text( "Second long document about natural language processing and transformers. " * 80, 6, ), generate_long_text( "Third long document about computer vision and neural networks. " * 80, 6 ), ] # Add some short texts to mix things up batch_inputs = [ "Short text before long texts", long_texts[0], "Short text between long texts", long_texts[1], long_texts[2], "Short text after long texts", ] print("📊 Batch composition:") for i, text in enumerate(batch_inputs): length = len(text) text_type = "Long (will be chunked)" if length > 5000 else "Short" print(f" - Input {i + 1}: {length} chars ({text_type})") try: start_time = time.time() response = client.embeddings.create( input=batch_inputs, model=MODEL_NAME, encoding_format="float" ) end_time = time.time() processing_time = end_time - start_time print("\n✅ Multiple long texts batch processing successful!") print(f" - Number of inputs: {len(batch_inputs)}") print(f" - Number of embeddings returned: {len(response.data)}") print(f" - Total processing time: {processing_time:.2f}s") # Verify each embedding is different (no incorrect aggregation) embeddings = [data.embedding for data in response.data] if len(embeddings) >= 3: import numpy as np # Compare embeddings of the long texts (indices 1, 3, 4) long_embeddings = [ np.array(embeddings[1]), # First long text np.array(embeddings[3]), # Second long text np.array(embeddings[4]), # Third long text ] print("\n🔍 Verifying embedding uniqueness:") for i in range(len(long_embeddings)): for j in range(i + 1, len(long_embeddings)): cosine_sim = np.dot(long_embeddings[i], long_embeddings[j]) / ( np.linalg.norm(long_embeddings[i]) * np.linalg.norm(long_embeddings[j]) ) print( f" - Similarity between long text {i + 1} and {j + 1}: " f"{cosine_sim:.4f}" ) if ( cosine_sim < 0.9 ): # Different content should have lower similarity print(" ✅ Good: Embeddings are appropriately different") else: print( " ⚠️ High similarity - may indicate chunk " "aggregation issue" ) print("\n📋 Per-input results:") for i, data in enumerate(response.data): input_length = len(batch_inputs[i]) embedding_dim = len(data.embedding) embedding_norm = np.linalg.norm(data.embedding) print( f" - Input {i + 1}: {input_length} chars → {embedding_dim}D " f"embedding (norm: {embedding_norm:.4f})" ) print( "\n✅ This test verifies the fix for chunk ID collisions in " "batch processing" ) print(" - Before fix: Multiple long texts would have conflicting chunk IDs") print(" - After fix: Each prompt's chunks have unique IDs with prompt index") except Exception as e: print(f"❌ Multiple long texts batch test failed: {str(e)}") print(" This might indicate the chunk ID collision bug is present!") def test_embedding_consistency(): """Test that chunked processing produces consistent results.""" client = OpenAI(api_key=API_KEY, base_url=BASE_URL) print("\n🔍 Testing Embedding Consistency") print("=" * 40) # Use the same long text multiple times long_text = generate_long_text( "Consistency test text for chunked processing validation. " * 50, 3 ) embeddings = [] try: for i in range(3): response = client.embeddings.create( input=long_text, model=MODEL_NAME, encoding_format="float" ) embeddings.append(response.data[0].embedding) print(f" - Generated embedding {i + 1}") # Check consistency (embeddings should be identical) if len(embeddings) >= 2: # Calculate similarity between first two embeddings emb1 = np.array(embeddings[0]) emb2 = np.array(embeddings[1]) # Cosine similarity cosine_sim = np.dot(emb1, emb2) / ( np.linalg.norm(emb1) * np.linalg.norm(emb2) ) print("✅ Consistency test completed!") print(f" - Cosine similarity between runs: {cosine_sim:.6f}") print(" - Expected: ~1.0 (identical embeddings)") if cosine_sim > 0.999: print(" - ✅ High consistency achieved!") else: print(" - ⚠️ Consistency may vary due to numerical precision") except Exception as e: print(f"❌ Consistency test failed: {str(e)}") def main(): """Main function to run all tests.""" print("🚀 vLLM Long Text Embedding Client") print(f"📡 Connecting to: {BASE_URL}") print(f"🤖 Model: {MODEL_NAME}") masked_key = "*" * (len(API_KEY) - 4) + API_KEY[-4:] if len(API_KEY) > 4 else "****" print(f"🔑 API Key: {masked_key}") # Run all test cases test_embedding_with_different_lengths() test_batch_embedding() test_multiple_long_texts_batch() test_embedding_consistency() print("\n" + "=" * 70) print("🎉 All tests completed!") print("\n💡 Key Features Demonstrated:") print(" - ✅ Automatic chunked processing for long text") print(" - ✅ Seamless handling of mixed-length batches") print(" - ✅ Multiple long texts in single batch (chunk ID fix)") print(" - ✅ Unified chunked processing:") print(" • Native pooling used within each chunk") print(" • MEAN aggregation across all chunks") print(" • Complete semantic coverage for all pooling types") print(" - ✅ Consistent embedding generation") print(" - ✅ Backward compatibility with short text") print("\n📚 For more information, see:") print( " - Documentation: https://docs.vllm.ai/en/latest/models/pooling_models.html" ) print(" - Chunked Processing Guide: openai_embedding_long_text.md") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/token_classify/ner_client.py
examples/pooling/token_classify/ner_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Adapted from https://huggingface.co/boltuix/NeuroBERT-NER """ Example online usage of Pooling API for Named Entity Recognition (NER). Run `vllm serve <model> --runner pooling` to start up the server in vLLM. e.g. vllm serve boltuix/NeuroBERT-NER """ import argparse import requests import torch def post_http_request(prompt: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=prompt) return response def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--model", type=str, default="boltuix/NeuroBERT-NER") return parser.parse_args() def main(args): from transformers import AutoConfig, AutoTokenizer api_url = f"http://{args.host}:{args.port}/pooling" model_name = args.model # Load tokenizer and config tokenizer = AutoTokenizer.from_pretrained(model_name) config = AutoConfig.from_pretrained(model_name) label_map = config.id2label # Input text text = "Barack Obama visited Microsoft headquarters in Seattle on January 2025." prompt = {"model": model_name, "input": text} pooling_response = post_http_request(prompt=prompt, api_url=api_url) # Run inference output = pooling_response.json()["data"][0] logits = torch.tensor(output["data"]) predictions = logits.argmax(dim=-1) inputs = tokenizer(text, return_tensors="pt") # Map predictions to labels tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0]) labels = [label_map[p.item()] for p in predictions] assert len(tokens) == len(predictions) # Print results for token, label in zip(tokens, labels): if token not in tokenizer.all_special_tokens: print(f"{token:15} → {label}") if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/token_classify/ner.py
examples/pooling/token_classify/ner.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # Adapted from https://huggingface.co/boltuix/NeuroBERT-NER from argparse import Namespace from vllm import LLM, EngineArgs from vllm.utils.argparse_utils import FlexibleArgumentParser def parse_args(): parser = FlexibleArgumentParser() parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( model="boltuix/NeuroBERT-NER", runner="pooling", enforce_eager=True, trust_remote_code=True, ) return parser.parse_args() def main(args: Namespace): # Sample prompts. prompts = [ "Barack Obama visited Microsoft headquarters in Seattle on January 2025." ] # Create an LLM. llm = LLM(**vars(args)) tokenizer = llm.get_tokenizer() label_map = llm.llm_engine.vllm_config.model_config.hf_config.id2label # Run inference outputs = llm.encode(prompts, pooling_task="token_classify") for prompt, output in zip(prompts, outputs): logits = output.outputs.data predictions = logits.argmax(dim=-1) # Map predictions to labels tokens = tokenizer.convert_ids_to_tokens(output.prompt_token_ids) labels = [label_map[p.item()] for p in predictions] # Print results for token, label in zip(tokens, labels): if token not in tokenizer.all_special_tokens: print(f"{token:15} → {label}") if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/pooling/classify/openai_classification_client.py
examples/pooling/classify/openai_classification_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example Python client for classification API using vLLM API server NOTE: start a supported classification model server with `vllm serve`, e.g. vllm serve jason9693/Qwen2.5-1.5B-apeach """ import argparse import pprint import requests def post_http_request(payload: dict, api_url: str) -> requests.Response: headers = {"User-Agent": "Test Client"} response = requests.post(api_url, headers=headers, json=payload) return response def parse_args(): parse = argparse.ArgumentParser() parse.add_argument("--host", type=str, default="localhost") parse.add_argument("--port", type=int, default=8000) parse.add_argument("--model", type=str, default="jason9693/Qwen2.5-1.5B-apeach") return parse.parse_args() def main(args): host = args.host port = args.port model_name = args.model api_url = f"http://{host}:{port}/classify" prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] payload = { "model": model_name, "input": prompts, } classify_response = post_http_request(payload=payload, api_url=api_url) pprint.pprint(classify_response.json()) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/retrieval_augmented_generation_with_llamaindex.py
examples/online_serving/retrieval_augmented_generation_with_llamaindex.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ RAG (Retrieval Augmented Generation) Implementation with LlamaIndex ================================================================ This script demonstrates a RAG system using: - LlamaIndex: For document indexing and retrieval - Milvus: As vector store backend - vLLM: For embedding and text generation Features: 1. Document Loading & Processing 2. Embedding & Storage 3. Query Processing Requirements: 1. Install dependencies: pip install llama-index llama-index-readers-web \ llama-index-llms-openai-like \ llama-index-embeddings-openai-like \ llama-index-vector-stores-milvus \ 2. Start services: # Start embedding service (port 8000) vllm serve ssmits/Qwen2-7B-Instruct-embed-base # Start chat service (port 8001) vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001 Usage: python retrieval_augmented_generation_with_llamaindex.py Notes: - Ensure both vLLM services are running before executing - Default ports: 8000 (embedding), 8001 (chat) - First run may take time to download models """ import argparse from argparse import Namespace from typing import Any from llama_index.core import Settings, StorageContext, VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.embeddings.openai_like import OpenAILikeEmbedding from llama_index.llms.openai_like import OpenAILike from llama_index.readers.web import SimpleWebPageReader from llama_index.vector_stores.milvus import MilvusVectorStore def init_config(args: Namespace): """Initialize configuration with command line arguments""" return { "url": args.url, "embedding_model": args.embedding_model, "chat_model": args.chat_model, "vllm_api_key": args.vllm_api_key, "embedding_endpoint": args.embedding_endpoint, "chat_endpoint": args.chat_endpoint, "db_path": args.db_path, "chunk_size": args.chunk_size, "chunk_overlap": args.chunk_overlap, "top_k": args.top_k, } def load_documents(url: str) -> list: """Load and process web documents""" return SimpleWebPageReader(html_to_text=True).load_data([url]) def setup_models(config: dict[str, Any]): """Configure embedding and chat models""" Settings.embed_model = OpenAILikeEmbedding( api_base=config["embedding_endpoint"], api_key=config["vllm_api_key"], model_name=config["embedding_model"], ) Settings.llm = OpenAILike( model=config["chat_model"], api_key=config["vllm_api_key"], api_base=config["chat_endpoint"], context_window=128000, is_chat_model=True, is_function_calling_model=False, ) Settings.transformations = [ SentenceSplitter( chunk_size=config["chunk_size"], chunk_overlap=config["chunk_overlap"], ) ] def setup_vector_store(db_path: str) -> MilvusVectorStore: """Initialize vector store""" sample_emb = Settings.embed_model.get_text_embedding("test") print(f"Embedding dimension: {len(sample_emb)}") return MilvusVectorStore(uri=db_path, dim=len(sample_emb), overwrite=True) def create_index(documents: list, vector_store: MilvusVectorStore): """Create document index""" storage_context = StorageContext.from_defaults(vector_store=vector_store) return VectorStoreIndex.from_documents( documents, storage_context=storage_context, ) def query_document(index: VectorStoreIndex, question: str, top_k: int): """Query document with given question""" query_engine = index.as_query_engine(similarity_top_k=top_k) return query_engine.query(question) def get_parser() -> argparse.ArgumentParser: """Parse command line arguments""" parser = argparse.ArgumentParser(description="RAG with vLLM and LlamaIndex") # Add command line arguments parser.add_argument( "--url", default=("https://docs.vllm.ai/en/latest/getting_started/quickstart.html"), help="URL of the document to process", ) parser.add_argument( "--embedding-model", default="ssmits/Qwen2-7B-Instruct-embed-base", help="Model name for embeddings", ) parser.add_argument( "--chat-model", default="qwen/Qwen1.5-0.5B-Chat", help="Model name for chat" ) parser.add_argument( "--vllm-api-key", default="EMPTY", help="API key for vLLM compatible services" ) parser.add_argument( "--embedding-endpoint", default="http://localhost:8000/v1", help="Base URL for embedding service", ) parser.add_argument( "--chat-endpoint", default="http://localhost:8001/v1", help="Base URL for chat service", ) parser.add_argument( "--db-path", default="./milvus_demo.db", help="Path to Milvus database" ) parser.add_argument( "-i", "--interactive", action="store_true", help="Enable interactive Q&A mode" ) parser.add_argument( "-c", "--chunk-size", type=int, default=1000, help="Chunk size for document splitting", ) parser.add_argument( "-o", "--chunk-overlap", type=int, default=200, help="Chunk overlap for document splitting", ) parser.add_argument( "-k", "--top-k", type=int, default=3, help="Number of top results to retrieve" ) return parser def main(): # Parse command line arguments args = get_parser().parse_args() # Initialize configuration config = init_config(args) # Load documents documents = load_documents(config["url"]) # Setup models setup_models(config) # Setup vector store vector_store = setup_vector_store(config["db_path"]) # Create index index = create_index(documents, vector_store) if args.interactive: print("\nEntering interactive mode. Type 'quit' to exit.") while True: # Get user question question = input("\nEnter your question: ") # Check for exit command if question.lower() in ["quit", "exit", "q"]: print("Exiting interactive mode...") break # Get and print response print("\n" + "-" * 50) print("Response:\n") response = query_document(index, question, config["top_k"]) print(response) print("-" * 50) else: # Single query mode question = "How to install vLLM?" response = query_document(index, question, config["top_k"]) print("-" * 50) print("Response:\n") print(response) print("-" * 50) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_transcription_client.py
examples/online_serving/openai_transcription_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This script demonstrates how to use the vLLM API server to perform audio transcription with the `openai/whisper-large-v3` model. Before running this script, you must start the vLLM server with the following command: vllm serve openai/whisper-large-v3 Requirements: - vLLM with audio support - openai Python SDK - httpx for streaming support The script performs: 1. Synchronous transcription using OpenAI-compatible API. 2. Streaming transcription using raw HTTP request to the vLLM server. """ import argparse import asyncio from openai import AsyncOpenAI, OpenAI from vllm.assets.audio import AudioAsset def sync_openai(audio_path: str, client: OpenAI, model: str): """ Perform synchronous transcription using OpenAI-compatible API. """ with open(audio_path, "rb") as f: transcription = client.audio.transcriptions.create( file=f, model=model, language="en", response_format="json", temperature=0.0, # Additional sampling params not provided by OpenAI API. extra_body=dict( seed=4419, repetition_penalty=1.3, ), ) print("transcription result [sync]:", transcription.text) async def stream_openai_response(audio_path: str, client: AsyncOpenAI, model: str): """ Perform asynchronous transcription using OpenAI-compatible API. """ print("\ntranscription result [stream]:", end=" ") with open(audio_path, "rb") as f: transcription = await client.audio.transcriptions.create( file=f, model=model, language="en", response_format="json", temperature=0.0, # Additional sampling params not provided by OpenAI API. extra_body=dict( seed=420, top_p=0.6, ), stream=True, ) async for chunk in transcription: if chunk.choices: content = chunk.choices[0].get("delta", {}).get("content") print(content, end="", flush=True) print() # Final newline after stream ends def stream_api_response(audio_path: str, model: str, openai_api_base: str): """ Perform streaming transcription using raw HTTP requests to the vLLM API server. """ import json import os import requests api_url = f"{openai_api_base}/audio/transcriptions" headers = {"User-Agent": "Transcription-Client"} with open(audio_path, "rb") as f: files = {"file": (os.path.basename(audio_path), f)} data = { "stream": "true", "model": model, "language": "en", "response_format": "json", } print("\ntranscription result [stream]:", end=" ") response = requests.post( api_url, headers=headers, files=files, data=data, stream=True ) for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\n" ): if chunk: data = chunk[len("data: ") :] data = json.loads(data.decode("utf-8")) data = data["choices"][0] delta = data["delta"]["content"] print(delta, end="", flush=True) finish_reason = data.get("finish_reason") if finish_reason is not None: print(f"\n[Stream finished reason: {finish_reason}]") break def main(args): mary_had_lamb = str(AudioAsset("mary_had_lamb").get_local_path()) winning_call = str(AudioAsset("winning_call").get_local_path()) # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) model = client.models.list().data[0].id print(f"Using model: {model}") # Run the synchronous function sync_openai(args.audio_path if args.audio_path else mary_had_lamb, client, model) # Run the asynchronous function if "openai" in model: client = AsyncOpenAI( api_key=openai_api_key, base_url=openai_api_base, ) asyncio.run( stream_openai_response( args.audio_path if args.audio_path else winning_call, client, model ) ) else: stream_api_response( args.audio_path if args.audio_path else winning_call, model, openai_api_base, ) if __name__ == "__main__": # setup argparser parser = argparse.ArgumentParser( description="OpenAI Transcription Client using vLLM API Server" ) parser.add_argument( "--audio_path", type=str, default=None, help="The path to the audio file to transcribe.", ) args = parser.parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_responses_client_with_tools.py
examples/online_serving/openai_responses_client_with_tools.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Set up this example by starting a vLLM OpenAI-compatible server with tool call options enabled. Reasoning models can be used through the Responses API as seen here https://platform.openai.com/docs/api-reference/responses For example: vllm serve Qwen/Qwen3-1.7B --reasoning-parser qwen3 \ --structured-outputs-config.backend xgrammar \ --enable-auto-tool-choice --tool-call-parser hermes """ import json from openai import OpenAI from utils import get_first_model def get_weather(latitude: float, longitude: float) -> str: """ Mock function to simulate getting weather data. In a real application, this would call an external weather API. """ return f"Current temperature at ({latitude}, {longitude}) is 20°C." tools = [ { "type": "function", "name": "get_weather", "description": "Get current temperature for provided coordinates in celsius.", "parameters": { "type": "object", "properties": { "latitude": {"type": "number"}, "longitude": {"type": "number"}, }, "required": ["latitude", "longitude"], "additionalProperties": False, }, "strict": True, } ] input_messages = [ {"role": "user", "content": "What's the weather like in Paris today?"} ] def main(): base_url = "http://0.0.0.0:8000/v1" client = OpenAI(base_url=base_url, api_key="empty") model = get_first_model(client) response = client.responses.create( model=model, input=input_messages, tools=tools, tool_choice="required" ) for out in response.output: if out.type == "function_call": print("Function call:", out.name, out.arguments) tool_call = out args = json.loads(tool_call.arguments) result = get_weather(args["latitude"], args["longitude"]) input_messages.append(tool_call) # append model's function call message input_messages.append( { # append result message "type": "function_call_output", "call_id": tool_call.call_id, "output": str(result), } ) response_2 = client.responses.create( model=model, input=input_messages, tools=tools, ) print(response_2.output_text) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/streamlit_openai_chatbot_webserver.py
examples/online_serving/streamlit_openai_chatbot_webserver.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ vLLM Chat Assistant - A Streamlit Web Interface A streamlined chat interface that quickly integrates with vLLM API server. Features: - Multiple chat sessions management - Streaming response display - Configurable API endpoint - Real-time chat history - Reasoning Display: Optional thinking process visualization Requirements: pip install streamlit openai Usage: # Start the app with default settings streamlit run streamlit_openai_chatbot_webserver.py # Start with custom vLLM API endpoint VLLM_API_BASE="http://your-server:8000/v1" \ streamlit run streamlit_openai_chatbot_webserver.py # Enable debug mode streamlit run streamlit_openai_chatbot_webserver.py \ --logger.level=debug """ import os from datetime import datetime import streamlit as st from openai import OpenAI # Get command line arguments from environment variables openai_api_key = os.getenv("VLLM_API_KEY", "EMPTY") openai_api_base = os.getenv("VLLM_API_BASE", "http://localhost:8000/v1") # Initialize session states for managing chat sessions if "sessions" not in st.session_state: st.session_state.sessions = {} if "current_session" not in st.session_state: st.session_state.current_session = None if "messages" not in st.session_state: st.session_state.messages = [] if "active_session" not in st.session_state: st.session_state.active_session = None # Add new session state for reasoning if "show_reasoning" not in st.session_state: st.session_state.show_reasoning = {} # Initialize session state for API base URL if "api_base_url" not in st.session_state: st.session_state.api_base_url = openai_api_base def create_new_chat_session(): """Create a new chat session with timestamp as unique identifier. This function initializes a new chat session by: 1. Generating a timestamp-based session ID 2. Creating an empty message list for the new session 3. Setting the new session as both current and active session 4. Resetting the messages list for the new session Returns: None Session State Updates: - sessions: Adds new empty message list with timestamp key - current_session: Sets to new session ID - active_session: Sets to new session ID - messages: Resets to empty list """ session_id = datetime.now().strftime("%Y-%m-%d %H:%M:%S") st.session_state.sessions[session_id] = [] st.session_state.current_session = session_id st.session_state.active_session = session_id st.session_state.messages = [] def switch_to_chat_session(session_id): """Switch the active chat context to a different session. Args: session_id (str): The timestamp ID of the session to switch to This function handles chat session switching by: 1. Setting the specified session as current 2. Updating the active session marker 3. Loading the messages history from the specified session Session State Updates: - current_session: Updated to specified session_id - active_session: Updated to specified session_id - messages: Loaded from sessions[session_id] """ st.session_state.current_session = session_id st.session_state.active_session = session_id st.session_state.messages = st.session_state.sessions[session_id] def get_llm_response(messages, model, reason, content_ph=None, reasoning_ph=None): """Generate and stream LLM response with optional reasoning process. Args: messages (list): List of conversation message dicts with 'role' and 'content' model (str): The model identifier to use for generation reason (bool): Whether to enable and display reasoning process content_ph (streamlit.empty): Placeholder for streaming response content reasoning_ph (streamlit.empty): Placeholder for streaming reasoning process Returns: tuple: (str, str) - First string contains the complete response text - Second string contains the complete reasoning text (if enabled) Features: - Streams both reasoning and response text in real-time - Handles model API errors gracefully - Supports live updating of thinking process - Maintains separate content and reasoning displays Raises: Exception: Wrapped in error message if API call fails Note: The function uses streamlit placeholders for live updates. When reason=True, the reasoning process appears above the response. """ full_text = "" think_text = "" live_think = None # Build request parameters params = {"model": model, "messages": messages, "stream": True} if reason: params["extra_body"] = {"chat_template_kwargs": {"enable_thinking": True}} try: response = client.chat.completions.create(**params) if isinstance(response, str): if content_ph: content_ph.markdown(response) return response, "" # Prepare reasoning expander above content if reason and reasoning_ph: exp = reasoning_ph.expander("💭 Thinking Process (live)", expanded=True) live_think = exp.empty() # Stream chunks for chunk in response: delta = chunk.choices[0].delta # Stream reasoning first if reason and hasattr(delta, "reasoning") and live_think: rc = delta.reasoning if rc: think_text += rc live_think.markdown(think_text + "▌") # Then stream content if hasattr(delta, "content") and delta.content and content_ph: full_text += delta.content content_ph.markdown(full_text + "▌") # Finalize displays: reasoning remains above, content below if reason and live_think: live_think.markdown(think_text) if content_ph: content_ph.markdown(full_text) return full_text, think_text except Exception as e: st.error(f"Error details: {str(e)}") return f"Error: {str(e)}", "" # Sidebar - API Settings first st.sidebar.title("API Settings") new_api_base = st.sidebar.text_input( "API Base URL:", value=st.session_state.api_base_url ) if new_api_base != st.session_state.api_base_url: st.session_state.api_base_url = new_api_base st.rerun() st.sidebar.divider() # Sidebar - Session Management st.sidebar.title("Chat Sessions") if st.sidebar.button("New Session"): create_new_chat_session() # Display all sessions in reverse chronological order for session_id in sorted(st.session_state.sessions.keys(), reverse=True): # Mark the active session with a pinned button if session_id == st.session_state.active_session: st.sidebar.button( f"📍 {session_id}", key=session_id, type="primary", on_click=switch_to_chat_session, args=(session_id,), ) else: st.sidebar.button( f"Session {session_id}", key=session_id, on_click=switch_to_chat_session, args=(session_id,), ) # Main interface st.title("vLLM Chat Assistant") # Initialize OpenAI client with API settings client = OpenAI(api_key=openai_api_key, base_url=st.session_state.api_base_url) # Get and display current model id models = client.models.list() model = models.data[0].id st.markdown(f"**Model**: {model}") # Initialize first session if none exists if st.session_state.current_session is None: create_new_chat_session() st.session_state.active_session = st.session_state.current_session # Update the chat history display section for idx, msg in enumerate(st.session_state.messages): # Render user messages normally if msg["role"] == "user": with st.chat_message("user"): st.write(msg["content"]) # Render assistant messages with reasoning above else: # If reasoning exists for this assistant message, show it above the content if idx in st.session_state.show_reasoning: with st.expander("💭 Thinking Process", expanded=False): st.markdown(st.session_state.show_reasoning[idx]) with st.chat_message("assistant"): st.write(msg["content"]) # Setup & Cache reasoning support check @st.cache_data(show_spinner=False) def server_supports_reasoning(): """Check if the current model supports reasoning capability. Returns: bool: True if the model supports reasoning, False otherwise """ resp = client.chat.completions.create( model=model, messages=[{"role": "user", "content": "Hi"}], stream=False, ) return hasattr(resp.choices[0].message, "reasoning") and bool( resp.choices[0].message.reasoning ) # Check support supports_reasoning = server_supports_reasoning() # Add reasoning toggle in sidebar if supported reason = False # Default to False if supports_reasoning: reason = st.sidebar.checkbox("Enable Reasoning", value=False) else: st.sidebar.markdown( "<span style='color:gray;'>Reasoning unavailable for this model.</span>", unsafe_allow_html=True, ) # reason remains False # Update the input handling section if prompt := st.chat_input("Type your message here..."): # Save and display user message st.session_state.messages.append({"role": "user", "content": prompt}) st.session_state.sessions[st.session_state.current_session] = ( st.session_state.messages ) with st.chat_message("user"): st.write(prompt) # Prepare LLM messages msgs = [ {"role": m["role"], "content": m["content"]} for m in st.session_state.messages ] # Stream assistant response with st.chat_message("assistant"): # Placeholders: reasoning above, content below reason_ph = st.empty() content_ph = st.empty() full, think = get_llm_response(msgs, model, reason, content_ph, reason_ph) # Determine index for this new assistant message message_index = len(st.session_state.messages) # Save assistant reply st.session_state.messages.append({"role": "assistant", "content": full}) # Persist reasoning in session state if any if reason and think: st.session_state.show_reasoning[message_index] = think
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_client_with_tools_xlam_streaming.py
examples/online_serving/openai_chat_completion_client_with_tools_xlam_streaming.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 """ Set up this example by starting a vLLM OpenAI-compatible server with tool call options enabled for xLAM-2 models: vllm serve --model Salesforce/Llama-xLAM-2-8b-fc-r --enable-auto-tool-choice --tool-call-parser xlam OR vllm serve --model Salesforce/xLAM-2-3b-fc-r --enable-auto-tool-choice --tool-call-parser xlam This example demonstrates streaming tool calls with xLAM models. """ import json import time from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "empty" openai_api_base = "http://localhost:8000/v1" # Define tool functions def get_weather(location: str, unit: str): return f"Weather in {location} is 22 degrees {unit}." def calculate_expression(expression: str): try: result = eval(expression) return f"The result of {expression} is {result}" except Exception as e: return f"Could not calculate {expression}: {e}" def translate_text(text: str, target_language: str): return f"Translation of '{text}' to {target_language}: [translated content]" # Define tools tools = [ { "type": "function", "function": { "name": "get_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "City and state, e.g., 'San Francisco, CA'", }, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location", "unit"], }, }, }, { "type": "function", "function": { "name": "calculate_expression", "description": "Calculate a mathematical expression", "parameters": { "type": "object", "properties": { "expression": { "type": "string", "description": "Mathematical expression to evaluate, needs to be a valid Python expression", } }, "required": ["expression"], }, }, }, { "type": "function", "function": { "name": "translate_text", "description": "Translate text to another language", "parameters": { "type": "object", "properties": { "text": {"type": "string", "description": "Text to translate"}, "target_language": { "type": "string", "description": "Target language for translation", }, }, "required": ["text", "target_language"], }, }, }, ] # Map of function names to implementations tool_functions = { "get_weather": get_weather, "calculate_expression": calculate_expression, "translate_text": translate_text, } def process_stream(response, tool_functions, original_query): """Process a streaming response with possible tool calls""" # Track multiple tool calls tool_calls = {} # Dictionary to store tool calls by ID current_id = None print("\n--- Stream Output ---") for chunk in response: # Handle tool calls in the stream if chunk.choices[0].delta.tool_calls: for tool_call_chunk in chunk.choices[0].delta.tool_calls: # Get the tool call ID if hasattr(tool_call_chunk, "id") and tool_call_chunk.id: current_id = tool_call_chunk.id if current_id not in tool_calls: tool_calls[current_id] = { "function_name": None, "function_args": "", "function_id": current_id, } # Extract function information as it comes in chunks if ( hasattr(tool_call_chunk, "function") and current_id and current_id in tool_calls ): if ( hasattr(tool_call_chunk.function, "name") and tool_call_chunk.function.name ): tool_calls[current_id]["function_name"] = ( tool_call_chunk.function.name ) print(f"Function called: {tool_call_chunk.function.name}") if ( hasattr(tool_call_chunk.function, "arguments") and tool_call_chunk.function.arguments ): tool_calls[current_id]["function_args"] += ( tool_call_chunk.function.arguments ) print(f"Arguments chunk: {tool_call_chunk.function.arguments}") # Handle regular content in the stream elif chunk.choices[0].delta.content: print(chunk.choices[0].delta.content, end="") print("\n--- End Stream ---\n") # Execute each function call and build messages for follow-up follow_up_messages = [{"role": "user", "content": original_query}] for tool_id, tool_data in tool_calls.items(): function_name = tool_data["function_name"] function_args = tool_data["function_args"] function_id = tool_data["function_id"] if function_name and function_args: try: # Parse the JSON arguments args = json.loads(function_args) # Call the function with the arguments function_result = tool_functions[function_name](**args) print( f"\n--- Function Result ({function_name}) ---\n{function_result}\n" ) # Add the assistant message with tool call follow_up_messages.append( { "role": "assistant", "tool_calls": [ { "id": function_id, "type": "function", "function": { "name": function_name, "arguments": function_args, }, } ], } ) # Add the tool message with function result follow_up_messages.append( { "role": "tool", "tool_call_id": function_id, "content": function_result, } ) except Exception as e: print(f"Error executing function: {e}") # Only send follow-up if we have results to process if len(follow_up_messages) > 1: # Create a follow-up message with all the function results follow_up_response = client.chat.completions.create( model=client.models.list().data[0].id, messages=follow_up_messages, stream=True, ) print("\n--- Follow-up Response ---") for chunk in follow_up_response: if chunk.choices[0].delta.content: print(chunk.choices[0].delta.content, end="") print("\n--- End Follow-up ---\n") def run_test_case(query, test_name): """Run a single test case with the given query""" print(f"\n{'=' * 50}\nTEST CASE: {test_name}\n{'=' * 50}") print(f"Query: '{query}'") start_time = time.time() # Create streaming chat completion request response = client.chat.completions.create( model=client.models.list().data[0].id, messages=[{"role": "user", "content": query}], tools=tools, tool_choice="auto", stream=True, ) # Process the streaming response process_stream(response, tool_functions, query) end_time = time.time() print(f"Test completed in {end_time - start_time:.2f} seconds") def main(): # Initialize OpenAI client global client client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) # Run test cases test_cases = [ ("I want to know the weather in San Francisco", "Weather Information"), ("Calculate 25 * 17 + 31", "Math Calculation"), ("Translate 'Hello world' to Spanish", "Text Translation"), ("What is the weather in Tokyo and New York in celsius", "Multiple Tool Usage"), ] # Execute all test cases for query, test_name in test_cases: run_test_case(query, test_name) time.sleep(1) # Small delay between tests print("\nAll tests completed.") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_client_with_tools_xlam.py
examples/online_serving/openai_chat_completion_client_with_tools_xlam.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project # ruff: noqa: E501 """ Set up this example by starting a vLLM OpenAI-compatible server with tool call options enabled for xLAM-2 models: vllm serve --model Salesforce/Llama-xLAM-2-8b-fc-r --enable-auto-tool-choice --tool-call-parser xlam OR vllm serve --model Salesforce/xLAM-2-3b-fc-r --enable-auto-tool-choice --tool-call-parser xlam """ import json import time from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "empty" openai_api_base = "http://localhost:8000/v1" # Define tool functions def get_weather(location: str, unit: str): return f"Weather in {location} is 22 degrees {unit}." def calculate_expression(expression: str): try: result = eval(expression) return f"The result of {expression} is {result}" except Exception as e: return f"Could not calculate {expression}: {e}" def translate_text(text: str, target_language: str): return f"Translation of '{text}' to {target_language}: [translated content]" # Define tools tools = [ { "type": "function", "function": { "name": "get_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "City and state, e.g., 'San Francisco, CA'", }, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location", "unit"], }, }, }, { "type": "function", "function": { "name": "calculate_expression", "description": "Calculate a mathematical expression", "parameters": { "type": "object", "properties": { "expression": { "type": "string", "description": "Mathematical expression to evaluate, needs to be a valid python expression", } }, "required": ["expression"], }, }, }, { "type": "function", "function": { "name": "translate_text", "description": "Translate text to another language", "parameters": { "type": "object", "properties": { "text": {"type": "string", "description": "Text to translate"}, "target_language": { "type": "string", "description": "Target language for translation", }, }, "required": ["text", "target_language"], }, }, }, ] # Map of function names to implementations tool_functions = { "get_weather": get_weather, "calculate_expression": calculate_expression, "translate_text": translate_text, } def process_response(response, tool_functions, original_query): """Process a non-streaming response with possible tool calls""" print("\n--- Response Output ---") # Check if the response has content if response.choices[0].message.content: print(f"Content: {response.choices[0].message.content}") # Check if the response has tool calls if response.choices[0].message.tool_calls: print("--------------------------------") print(f"Tool calls: {response.choices[0].message.tool_calls}") print("--------------------------------") # Collect all tool calls and results before making follow-up request tool_results = [] assistant_message = {"role": "assistant"} if response.choices[0].message.content: assistant_message["content"] = response.choices[0].message.content assistant_tool_calls = [] # Process each tool call for tool_call in response.choices[0].message.tool_calls: function_name = tool_call.function.name function_args = tool_call.function.arguments function_id = tool_call.id print(f"Function called: {function_name}") print(f"Arguments: {function_args}") print(f"Function ID: {function_id}") # Execute the function try: # Parse the JSON arguments args = json.loads(function_args) # Call the function with the arguments function_result = tool_functions[function_name](**args) print(f"\n--- Function Result ---\n{function_result}\n") # Add tool call to assistant message assistant_tool_calls.append( { "id": function_id, "type": "function", "function": {"name": function_name, "arguments": function_args}, } ) # Add tool result to tool_results tool_results.append( { "role": "tool", "tool_call_id": function_id, "content": function_result, } ) except Exception as e: print(f"Error executing function: {e}") # Add tool_calls to assistant message assistant_message["tool_calls"] = assistant_tool_calls # Create a follow-up message with all function results follow_up_messages = [ {"role": "user", "content": original_query}, assistant_message, ] # Add all tool results to the messages follow_up_messages.extend(tool_results) # Get completion with all tool results in a single follow-up follow_up_response = client.chat.completions.create( model=client.models.list().data[0].id, messages=follow_up_messages, stream=False, ) print("\n--- Follow-up Response ---") print(follow_up_response.choices[0].message.content) print("--- End Follow-up ---\n") print("--- End Response ---\n") def run_test_case(query, test_name): """Run a single test case with the given query""" print(f"\n{'=' * 50}\nTEST CASE: {test_name}\n{'=' * 50}") print(f"Query: '{query}'") start_time = time.time() # Create non-streaming chat completion request response = client.chat.completions.create( model=client.models.list().data[0].id, messages=[{"role": "user", "content": query}], tools=tools, tool_choice="auto", stream=False, ) # Process the non-streaming response, passing the original query process_response(response, tool_functions, query) end_time = time.time() print(f"Test completed in {end_time - start_time:.2f} seconds") def main(): # Initialize OpenAI client global client client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) # Run test cases test_cases = [ ("I want to know the weather in San Francisco", "Weather Information"), ("Calculate 25 * 17 + 31", "Math Calculation"), ("Translate 'Hello world' to Spanish", "Text Translation"), ("What is the weather in Tokyo and New York in celsius", "Multiple Tool Usage"), ] # Execute all test cases for query, test_name in test_cases: run_test_case(query, test_name) time.sleep(1) # Small delay between tests print("\nAll tests completed.") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_responses_client_with_mcp_tools.py
examples/online_serving/openai_responses_client_with_mcp_tools.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Example demonstrating MCP (Model Context Protocol) tools with the Responses API. This example shows how to use MCP tools with different allowed_tools configurations: 1. No filter (allows all tools from the MCP server) 2. Wildcard "*" (explicitly allows all tools) 3. Specific tool names (filters to only those tools) Set up this example by starting a vLLM OpenAI-compatible server with MCP tools enabled. For example: vllm serve openai/gpt-oss-20b --enforce-eager --tool-server demo Environment variables: - VLLM_ENABLE_RESPONSES_API_STORE=1 - VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS=code_interpreter,container - VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS=1 """ from openai import OpenAI from utils import get_first_model def example_no_filter(): """Example with no allowed_tools filter - allows all tools.""" print("=" * 60) print("Example 1: No allowed_tools filter (allows all tools)") print("=" * 60) base_url = "http://0.0.0.0:8000/v1" client = OpenAI(base_url=base_url, api_key="empty") model = get_first_model(client) response = client.responses.create( model=model, input="Execute this code: print('Hello from Python!')", instructions="Use the Python tool to execute code.", tools=[ { "type": "mcp", "server_label": "code_interpreter", "server_url": "http://localhost:8888", # No allowed_tools specified - all tools are available } ], ) print(f"Status: {response.status}") print(f"Output: {response.output_text}") print() def example_wildcard(): """Example with allowed_tools=['*'] - explicitly allows all tools.""" print("=" * 60) print("Example 2: allowed_tools=['*'] (select all tools)") print("=" * 60) base_url = "http://0.0.0.0:8000/v1" client = OpenAI(base_url=base_url, api_key="empty") model = get_first_model(client) response = client.responses.create( model=model, input="Execute this code: print('Hello from Python with wildcard!')", instructions="Use the Python tool to execute code.", tools=[ { "type": "mcp", "server_label": "code_interpreter", "server_url": "http://localhost:8888", # Using "*" to explicitly allow all tools from this MCP server # This is equivalent to not specifying allowed_tools "allowed_tools": ["*"], } ], ) print(f"Status: {response.status}") print(f"Output: {response.output_text}") print() def example_specific_tools(): """Example with specific allowed_tools list - filters available tools. Note: This example uses 'web_search_preview' (browser) which has multiple sub-tools: 'search', 'open', 'find'. The code_interpreter (python) doesn't have sub-tools, so filtering doesn't apply there. """ print("=" * 60) print("Example 3: allowed_tools=['search'] (filter browser to specific tools)") print("=" * 60) base_url = "http://0.0.0.0:8000/v1" client = OpenAI(base_url=base_url, api_key="empty") model = get_first_model(client) response = client.responses.create( model=model, input="Search for 'Python programming tutorials'", instructions="Use the browser tool to search.", tools=[ { "type": "mcp", "server_label": "web_search_preview", "server_url": "http://localhost:8888", # Browser has tools: 'search', 'open', 'find' # Only allow 'search' - blocks 'open' and 'find' "allowed_tools": ["search"], } ], ) print(f"Status: {response.status}") print(f"Output: {response.output_text}") print() def example_object_format(): """Example using object format for allowed_tools with browser tools.""" print("=" * 60) print("Example 4: allowed_tools with object format") print("=" * 60) base_url = "http://0.0.0.0:8000/v1" client = OpenAI(base_url=base_url, api_key="empty") model = get_first_model(client) response = client.responses.create( model=model, input="Search for 'machine learning' and open the first result", instructions="Use the browser tool.", tools=[ { "type": "mcp", "server_label": "web_search_preview", "server_url": "http://localhost:8888", # Object format with tool_names field # Can also include read_only and other fields # Browser has tools: 'search', 'open', 'find' "allowed_tools": { "tool_names": [ "search", "open", ], # Allow search and open, block find "read_only": False, }, } ], ) print(f"Status: {response.status}") print(f"Output: {response.output_text}") print() def main(): """Run all examples.""" print("\n" + "=" * 60) print("MCP Tools with allowed_tools Examples") print("=" * 60 + "\n") # Run all examples example_no_filter() example_wildcard() example_specific_tools() example_object_format() print("=" * 60) print("Summary:") print(" - No filter or '*' → All tools available from server") print(" - Specific list → Only those sub-tools available") print(" - Object format → More control with tool_names field") print("") print("Note: allowed_tools filters SUB-TOOLS within an MCP server:") print(" - code_interpreter (python): No sub-tools to filter") print(" - web_search_preview (browser): Has 'search', 'open', 'find'") print("=" * 60) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_completion_client.py
examples/online_serving/openai_completion_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" def parse_args(): parser = argparse.ArgumentParser(description="Client for vLLM API server") parser.add_argument( "--stream", action="store_true", help="Enable streaming response" ) return parser.parse_args() def main(args): client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id # Completion API completion = client.completions.create( model=model, prompt="A robot may not injure a human being", echo=False, n=2, stream=args.stream, logprobs=3, ) print("-" * 50) print("Completion results:") if args.stream: for c in completion: print(c) else: print(completion) print("-" * 50) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/gradio_webserver.py
examples/online_serving/gradio_webserver.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example for starting a Gradio Webserver Start vLLM API server: python -m vllm.entrypoints.api_server \ --model meta-llama/Llama-2-7b-chat-hf Start Webserver: python examples/online_serving/gradio_webserver.py Note that `pip install --upgrade gradio` is needed to run this example. More details: https://github.com/gradio-app/gradio If your antivirus software blocks the download of frpc for gradio, you can install it manually by following these steps: 1. Download this file: https://cdn-media.huggingface.co/frpc-gradio-0.3/frpc_linux_amd64 2. Rename the downloaded file to: frpc_linux_amd64_v0.3 3. Move the file to this location: /home/user/.cache/huggingface/gradio/frpc """ import argparse import json import gradio as gr import requests def http_bot(prompt): headers = {"User-Agent": "vLLM Client"} pload = { "prompt": prompt, "stream": True, "max_tokens": 128, } response = requests.post(args.model_url, headers=headers, json=pload, stream=True) for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\n" ): if chunk: data = json.loads(chunk.decode("utf-8")) output = data["text"][0] yield output def build_demo(): with gr.Blocks() as demo: gr.Markdown("# vLLM text completion demo\n") inputbox = gr.Textbox(label="Input", placeholder="Enter text and press ENTER") outputbox = gr.Textbox( label="Output", placeholder="Generated result from the model" ) inputbox.submit(http_bot, [inputbox], [outputbox]) return demo def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default=None) parser.add_argument("--port", type=int, default=8001) parser.add_argument( "--model-url", type=str, default="http://localhost:8000/generate" ) return parser.parse_args() def main(args): demo = build_demo() demo.queue().launch(server_name=args.host, server_port=args.port, share=True) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/api_client.py
examples/online_serving/api_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example Python client for `vllm.entrypoints.api_server` Start the demo server: python -m vllm.entrypoints.api_server --model <model_name> NOTE: The API server is used only for demonstration and simple performance benchmarks. It is not intended for production use. For production use, we recommend `vllm serve` and the OpenAI client API. """ import argparse import json from argparse import Namespace from collections.abc import Iterable import requests def clear_line(n: int = 1) -> None: LINE_UP = "\033[1A" LINE_CLEAR = "\x1b[2K" for _ in range(n): print(LINE_UP, end=LINE_CLEAR, flush=True) def post_http_request( prompt: str, api_url: str, n: int = 1, stream: bool = False ) -> requests.Response: headers = {"User-Agent": "Test Client"} pload = { "prompt": prompt, "n": n, "temperature": 0.0, "max_tokens": 16, "stream": stream, } response = requests.post(api_url, headers=headers, json=pload, stream=stream) return response def get_streaming_response(response: requests.Response) -> Iterable[list[str]]: for chunk in response.iter_lines( chunk_size=8192, decode_unicode=False, delimiter=b"\n" ): if chunk: data = json.loads(chunk.decode("utf-8")) output = data["text"] yield output def get_response(response: requests.Response) -> list[str]: data = json.loads(response.content) output = data["text"] return output def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--port", type=int, default=8000) parser.add_argument("--n", type=int, default=1) parser.add_argument("--prompt", type=str, default="San Francisco is a") parser.add_argument("--stream", action="store_true") return parser.parse_args() def main(args: Namespace): prompt = args.prompt api_url = f"http://{args.host}:{args.port}/generate" n = args.n stream = args.stream print(f"Prompt: {prompt!r}\n", flush=True) response = post_http_request(prompt, api_url, n, stream) if stream: num_printed_lines = 0 for h in get_streaming_response(response): clear_line(num_printed_lines) num_printed_lines = 0 for i, line in enumerate(h): num_printed_lines += 1 print(f"Beam candidate {i}: {line!r}", flush=True) else: output = get_response(response) for i, line in enumerate(output): print(f"Beam candidate {i}: {line!r}", flush=True) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_client_with_tools_required.py
examples/online_serving/openai_chat_completion_client_with_tools_required.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ To run this example, you can start the vLLM server without any specific flags: ```bash vllm serve unsloth/Llama-3.2-1B-Instruct \ --structured-outputs-config.backend outlines ``` This example demonstrates how to generate chat completions using the OpenAI Python client library. """ from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": { "city": { "type": "string", "description": "The city to find the weather for" ", e.g. 'San Francisco'", }, "state": { "type": "string", "description": ( "the two-letter abbreviation for the state that the " "city is in, e.g. 'CA' which would mean 'California'" ), }, "unit": { "type": "string", "description": "The unit to fetch the temperature in", "enum": ["celsius", "fahrenheit"], }, }, "required": ["city", "state", "unit"], }, }, }, { "type": "function", "function": { "name": "get_forecast", "description": "Get the weather forecast for a given location", "parameters": { "type": "object", "properties": { "city": { "type": "string", "description": ( "The city to get the forecast for, e.g. 'New York'" ), }, "state": { "type": "string", "description": ( "The two-letter abbreviation for the state, e.g. 'NY'" ), }, "days": { "type": "integer", "description": "Number of days to get the forecast for (1-7)", }, "unit": { "type": "string", "description": "The unit to fetch the temperature in", "enum": ["celsius", "fahrenheit"], }, }, "required": ["city", "state", "days", "unit"], }, }, }, ] messages = [ {"role": "user", "content": "Hi! How are you doing today?"}, {"role": "assistant", "content": "I'm doing well! How can I help you?"}, { "role": "user", "content": "Can you tell me what the current weather is in Dallas \ and the forecast for the next 5 days, in fahrenheit?", }, ] def main(): client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id chat_completion = client.chat.completions.create( messages=messages, model=model, tools=tools, tool_choice="required", stream=True, # Enable streaming response ) for chunk in chat_completion: if chunk.choices and chunk.choices[0].delta.tool_calls: print(chunk.choices[0].delta.tool_calls) chat_completion = client.chat.completions.create( messages=messages, model=model, tools=tools, tool_choice="required" ) print(chat_completion.choices[0].message.tool_calls) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/ray_serve_deepseek.py
examples/online_serving/ray_serve_deepseek.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Deploy DeepSeek R1 or V3 with Ray Serve LLM. Ray Serve LLM is a scalable and production-grade model serving library built on the Ray distributed computing framework and first-class support for the vLLM engine. Key features: - Automatic scaling, back-pressure, and load balancing across a Ray cluster. - Unified multi-node multi-model deployment. - Exposes an OpenAI-compatible HTTP API. - Multi-LoRA support with shared base models. Run `python3 ray_serve_deepseek.py` to launch an endpoint. Learn more in the official Ray Serve LLM documentation: https://docs.ray.io/en/latest/serve/llm/serving-llms.html """ from ray import serve from ray.serve.llm import LLMConfig, build_openai_app llm_config = LLMConfig( model_loading_config={ "model_id": "deepseek", # Pre-downloading the model to local storage is recommended since # the model is large. Set model_source="/path/to/the/model". "model_source": "deepseek-ai/DeepSeek-R1", }, deployment_config={ "autoscaling_config": { "min_replicas": 1, "max_replicas": 1, } }, # Set to the node's accelerator type. accelerator_type="H100", # Customize engine arguments as required (for example, vLLM engine kwargs). engine_kwargs={ "tensor_parallel_size": 8, "pipeline_parallel_size": 2, "gpu_memory_utilization": 0.92, "dtype": "auto", "max_num_seqs": 40, "max_model_len": 16384, "enable_chunked_prefill": True, "enable_prefix_caching": True, "trust_remote_code": True, }, ) # Deploy the application. llm_app = build_openai_app({"llm_configs": [llm_config]}) serve.run(llm_app)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_responses_client.py
examples/online_serving/openai_responses_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Set up this example by starting a vLLM OpenAI-compatible server. Reasoning models can be used through the Responses API as seen here https://platform.openai.com/docs/api-reference/responses For example: vllm serve Qwen/Qwen3-8B --reasoning-parser qwen3 """ from openai import OpenAI input_messages = [{"role": "user", "content": "What model are you?"}] def main(): base_url = "http://localhost:8000/v1" client = OpenAI(base_url=base_url, api_key="empty") model = "Qwen/Qwen3-8B" # get_first_model(client) response = client.responses.create( model=model, input=input_messages, ) for message in response.output: if message.type == "reasoning": # append reasoning message input_messages.append(message) response_2 = client.responses.create( model=model, input=input_messages, ) print(response_2.output_text) # I am Qwen, a large language model developed by Alibaba Cloud. # I am designed to assist with a wide range of tasks, including # answering questions, creating content, coding, and engaging in # conversations. I can help with various topics and provide # information or support in multiple languages. How can I assist you today? if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_translation_client.py
examples/online_serving/openai_translation_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import json import httpx from openai import OpenAI from vllm.assets.audio import AudioAsset def sync_openai(audio_path: str, client: OpenAI): with open(audio_path, "rb") as f: translation = client.audio.translations.create( file=f, model="openai/whisper-large-v3", response_format="json", temperature=0.0, # Additional params not provided by OpenAI API. extra_body=dict( language="it", seed=4419, repetition_penalty=1.3, ), ) print("translation result:", translation.text) async def stream_openai_response(audio_path: str, base_url: str, api_key: str): data = { "language": "it", "stream": True, "model": "openai/whisper-large-v3", } url = base_url + "/audio/translations" headers = {"Authorization": f"Bearer {api_key}"} print("translation result:", end=" ") # OpenAI translation API client does not support streaming. async with httpx.AsyncClient() as client: with open(audio_path, "rb") as f: async with client.stream( "POST", url, files={"file": f}, data=data, headers=headers ) as response: async for line in response.aiter_lines(): # Each line is a JSON object prefixed with 'data: ' if line: if line.startswith("data: "): line = line[len("data: ") :] # Last chunk, stream ends if line.strip() == "[DONE]": break # Parse the JSON response chunk = json.loads(line) # Extract and print the content content = chunk["choices"][0].get("delta", {}).get("content") print(content, end="") def main(): foscolo = str(AudioAsset("azacinto_foscolo").get_local_path()) # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) sync_openai(foscolo, client) # Run the asynchronous function asyncio.run(stream_openai_response(foscolo, openai_api_base, openai_api_key)) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/retrieval_augmented_generation_with_langchain.py
examples/online_serving/retrieval_augmented_generation_with_langchain.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Retrieval Augmented Generation (RAG) Implementation with Langchain ================================================================== This script demonstrates a RAG implementation using LangChain, Milvus and vLLM. RAG enhances LLM responses by retrieving relevant context from a document collection. Features: - Web content loading and chunking - Vector storage with Milvus - Embedding generation with vLLM - Question answering with context Prerequisites: 1. Install dependencies: pip install -U vllm \ langchain_milvus langchain_openai \ langchain_community beautifulsoup4 \ langchain-text-splitters 2. Start services: # Start embedding service (port 8000) vllm serve ssmits/Qwen2-7B-Instruct-embed-base # Start chat service (port 8001) vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001 Usage: python retrieval_augmented_generation_with_langchain.py Notes: - Ensure both vLLM services are running before executing - Default ports: 8000 (embedding), 8001 (chat) - First run may take time to download models """ import argparse from argparse import Namespace from typing import Any from langchain_community.document_loaders import WebBaseLoader from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_milvus import Milvus from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter def load_and_split_documents(config: dict[str, Any]): """ Load and split documents from web URL """ try: loader = WebBaseLoader(web_paths=(config["url"],)) docs = loader.load() text_splitter = RecursiveCharacterTextSplitter( chunk_size=config["chunk_size"], chunk_overlap=config["chunk_overlap"], ) return text_splitter.split_documents(docs) except Exception as e: print(f"Error loading document from {config['url']}: {str(e)}") raise def init_vectorstore(config: dict[str, Any], documents: list[Document]): """ Initialize vector store with documents """ return Milvus.from_documents( documents=documents, embedding=OpenAIEmbeddings( model=config["embedding_model"], openai_api_key=config["vllm_api_key"], openai_api_base=config["vllm_embedding_endpoint"], ), connection_args={"uri": config["uri"]}, drop_old=True, ) def init_llm(config: dict[str, Any]): """ Initialize llm """ return ChatOpenAI( model=config["chat_model"], openai_api_key=config["vllm_api_key"], openai_api_base=config["vllm_chat_endpoint"], ) def get_qa_prompt(): """ Get question answering prompt template """ template = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. Question: {question} Context: {context} Answer: """ return PromptTemplate.from_template(template) def format_docs(docs: list[Document]): """ Format documents for prompt """ return "\n\n".join(doc.page_content for doc in docs) def create_qa_chain(retriever: Any, llm: ChatOpenAI, prompt: PromptTemplate): """ Set up question answering chain """ return ( { "context": retriever | format_docs, "question": RunnablePassthrough(), } | prompt | llm | StrOutputParser() ) def get_parser() -> argparse.ArgumentParser: """ Parse command line arguments """ parser = argparse.ArgumentParser(description="RAG with vLLM and langchain") # Add command line arguments parser.add_argument( "--vllm-api-key", default="EMPTY", help="API key for vLLM compatible services" ) parser.add_argument( "--vllm-embedding-endpoint", default="http://localhost:8000/v1", help="Base URL for embedding service", ) parser.add_argument( "--vllm-chat-endpoint", default="http://localhost:8001/v1", help="Base URL for chat service", ) parser.add_argument("--uri", default="./milvus.db", help="URI for Milvus database") parser.add_argument( "--url", default=("https://docs.vllm.ai/en/latest/getting_started/quickstart.html"), help="URL of the document to process", ) parser.add_argument( "--embedding-model", default="ssmits/Qwen2-7B-Instruct-embed-base", help="Model name for embeddings", ) parser.add_argument( "--chat-model", default="qwen/Qwen1.5-0.5B-Chat", help="Model name for chat" ) parser.add_argument( "-i", "--interactive", action="store_true", help="Enable interactive Q&A mode" ) parser.add_argument( "-k", "--top-k", type=int, default=3, help="Number of top results to retrieve" ) parser.add_argument( "-c", "--chunk-size", type=int, default=1000, help="Chunk size for document splitting", ) parser.add_argument( "-o", "--chunk-overlap", type=int, default=200, help="Chunk overlap for document splitting", ) return parser def init_config(args: Namespace): """ Initialize configuration settings from command line arguments """ return { "vllm_api_key": args.vllm_api_key, "vllm_embedding_endpoint": args.vllm_embedding_endpoint, "vllm_chat_endpoint": args.vllm_chat_endpoint, "uri": args.uri, "embedding_model": args.embedding_model, "chat_model": args.chat_model, "url": args.url, "chunk_size": args.chunk_size, "chunk_overlap": args.chunk_overlap, "top_k": args.top_k, } def main(): # Parse command line arguments args = get_parser().parse_args() # Initialize configuration config = init_config(args) # Load and split documents documents = load_and_split_documents(config) # Initialize vector store and retriever vectorstore = init_vectorstore(config, documents) retriever = vectorstore.as_retriever(search_kwargs={"k": config["top_k"]}) # Initialize llm and prompt llm = init_llm(config) prompt = get_qa_prompt() # Set up QA chain qa_chain = create_qa_chain(retriever, llm, prompt) # Interactive mode if args.interactive: print("\nWelcome to Interactive Q&A System!") print("Enter 'q' or 'quit' to exit.") while True: question = input("\nPlease enter your question: ") if question.lower() in ["q", "quit"]: print("\nThank you for using! Goodbye!") break output = qa_chain.invoke(question) print(output) else: # Default single question mode question = "How to install vLLM?" output = qa_chain.invoke(question) print("-" * 50) print(output) print("-" * 50) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_with_reasoning.py
examples/online_serving/openai_chat_completion_with_reasoning.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ An example shows how to generate chat completions from reasoning models like DeepSeekR1. To run this example, you need to start the vLLM server with the reasoning parser: ```bash vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ --reasoning-parser deepseek_r1 ``` This example demonstrates how to generate chat completions from reasoning models using the OpenAI Python client library. """ from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" def main(): client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id # Round 1 messages = [{"role": "user", "content": "9.11 and 9.8, which is greater?"}] # ruff: noqa: E501 # For granite, add: `extra_body={"chat_template_kwargs": {"thinking": True}}` response = client.chat.completions.create(model=model, messages=messages) reasoning = response.choices[0].message.reasoning content = response.choices[0].message.content print("reasoning for Round 1:", reasoning) print("content for Round 1:", content) # Round 2 messages.append({"role": "assistant", "content": content}) messages.append( { "role": "user", "content": "How many Rs are there in the word 'strawberry'?", } ) response = client.chat.completions.create(model=model, messages=messages) reasoning = response.choices[0].message.reasoning content = response.choices[0].message.content print("reasoning for Round 2:", reasoning) print("content for Round 2:", content) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/prompt_embed_inference_with_openai_client.py
examples/online_serving/prompt_embed_inference_with_openai_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ vLLM OpenAI-Compatible Client with Prompt Embeddings This script demonstrates how to: 1. Generate prompt embeddings using Hugging Face Transformers 2. Encode them in base64 format 3. Send them to a vLLM server via the OpenAI-compatible Completions API Run the vLLM server first: vllm serve meta-llama/Llama-3.2-1B-Instruct \ --runner generate \ --max-model-len 4096 \ --enable-prompt-embeds Run the client: python examples/online_serving/prompt_embed_inference_with_openai_client.py Model: meta-llama/Llama-3.2-1B-Instruct Note: This model is gated on Hugging Face Hub. You must request access to use it: https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct Dependencies: - transformers - torch - openai """ import transformers from openai import OpenAI from vllm.utils.serial_utils import tensor2base64 def main(): client = OpenAI( api_key="EMPTY", base_url="http://localhost:8000/v1", ) model_name = "meta-llama/Llama-3.2-1B-Instruct" # Transformers tokenizer = transformers.AutoTokenizer.from_pretrained(model_name) transformers_model = transformers.AutoModelForCausalLM.from_pretrained(model_name) # Refer to the HuggingFace repo for the correct format to use chat = [{"role": "user", "content": "Please tell me about the capital of France."}] token_ids = tokenizer.apply_chat_template( chat, add_generation_prompt=True, return_tensors="pt" ) embedding_layer = transformers_model.get_input_embeddings() prompt_embeds = embedding_layer(token_ids).squeeze(0) # Prompt embeddings encoded_embeds = tensor2base64(prompt_embeds) completion = client.completions.create( model=model_name, # NOTE: The OpenAI client does not allow `None` as an input to # `prompt`. Use an empty string if you have no text prompts. prompt="", max_tokens=5, temperature=0.0, # NOTE: The OpenAI client allows passing in extra JSON body via the # `extra_body` argument. extra_body={"prompt_embeds": encoded_embeds}, ) print("-" * 30) print(completion.choices[0].text) print("-" * 30) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/multi_instance_data_parallel.py
examples/online_serving/multi_instance_data_parallel.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import asyncio import threading from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.outputs import RequestOutput from vllm.sampling_params import SamplingParams from vllm.v1.metrics.loggers import AggregatedLoggingStatLogger """ To run this example, run the following commands simultaneously with different CUDA_VISIBLE_DEVICES: python examples/online_serving/multi_instance_data_parallel.py vllm serve ibm-research/PowerMoE-3b -dp 2 -dpr 1 \ --data-parallel-address 127.0.0.1 --data-parallel-rpc-port 62300 \ --data-parallel-size-local 1 --enforce-eager --headless Once both instances have completed the handshake, this example will send a request to the instance with DP rank 1. """ def _do_background_logging(engine, interval, stop_event): try: while not stop_event.is_set(): asyncio.run(engine.do_log_stats()) stop_event.wait(interval) except Exception as e: print(f"vLLM background logging shutdown: {e}") pass async def main(): engine_args = AsyncEngineArgs( model="ibm-research/PowerMoE-3b", data_parallel_size=2, tensor_parallel_size=1, dtype="auto", max_model_len=2048, data_parallel_address="127.0.0.1", data_parallel_rpc_port=62300, data_parallel_size_local=1, enforce_eager=True, enable_log_requests=True, disable_custom_all_reduce=True, ) engine_client = AsyncLLMEngine.from_engine_args( engine_args, # Example: Using aggregated logger stat_loggers=[AggregatedLoggingStatLogger], ) stop_logging_event = threading.Event() logging_thread = threading.Thread( target=_do_background_logging, args=(engine_client, 5, stop_logging_event), daemon=True, ) logging_thread.start() sampling_params = SamplingParams( temperature=0.7, top_p=0.9, max_tokens=100, ) num_prompts = 10 for i in range(num_prompts): prompt = "Who won the 2004 World Series?" final_output: RequestOutput | None = None async for output in engine_client.generate( prompt=prompt, sampling_params=sampling_params, request_id=f"abcdef-{i}", data_parallel_rank=1, ): final_output = output if final_output: print(final_output.outputs[0].text) stop_logging_event.set() logging_thread.join() if __name__ == "__main__": asyncio.run(main())
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/utils.py
examples/online_serving/utils.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from openai import APIConnectionError, OpenAI from openai.pagination import SyncPage from openai.types.model import Model def get_first_model(client: OpenAI) -> str: """ Get the first model from the vLLM server. """ try: models: SyncPage[Model] = client.models.list() except APIConnectionError as e: raise RuntimeError( "Failed to get the list of models from the vLLM server at " f"{client.base_url} with API key {client.api_key}. Check\n" "1. the server is running\n" "2. the server URL is correct\n" "3. the API key is correct" ) from e if len(models.data) == 0: raise RuntimeError(f"No models found on the vLLM server at {client.base_url}") return models.data[0].id
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/gradio_openai_chatbot_webserver.py
examples/online_serving/gradio_openai_chatbot_webserver.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example for starting a Gradio OpenAI Chatbot Webserver Start vLLM API server: vllm serve meta-llama/Llama-2-7b-chat-hf Start Gradio OpenAI Chatbot Webserver: python examples/online_serving/gradio_openai_chatbot_webserver.py \ -m meta-llama/Llama-2-7b-chat-hf Note that `pip install --upgrade gradio` is needed to run this example. More details: https://github.com/gradio-app/gradio If your antivirus software blocks the download of frpc for gradio, you can install it manually by following these steps: 1. Download this file: https://cdn-media.huggingface.co/frpc-gradio-0.3/frpc_linux_amd64 2. Rename the downloaded file to: frpc_linux_amd64_v0.3 3. Move the file to this location: /home/user/.cache/huggingface/gradio/frpc """ import argparse import gradio as gr from openai import OpenAI def predict(message, history, client, model_name, temp, stop_token_ids): messages = [ {"role": "system", "content": "You are a great AI assistant."}, *history, {"role": "user", "content": message}, ] # Send request to OpenAI API (vLLM server) stream = client.chat.completions.create( model=model_name, messages=messages, temperature=temp, stream=True, extra_body={ "repetition_penalty": 1, "stop_token_ids": [int(id.strip()) for id in stop_token_ids.split(",")] if stop_token_ids else [], }, ) # Collect all chunks and concatenate them into a full message full_message = "" for chunk in stream: full_message += chunk.choices[0].delta.content or "" # Return the full message as a single response return full_message def parse_args(): parser = argparse.ArgumentParser( description="Chatbot Interface with Customizable Parameters" ) parser.add_argument( "--model-url", type=str, default="http://localhost:8000/v1", help="Model URL" ) parser.add_argument( "-m", "--model", type=str, required=True, help="Model name for the chatbot" ) parser.add_argument( "--temp", type=float, default=0.8, help="Temperature for text generation" ) parser.add_argument( "--stop-token-ids", type=str, default="", help="Comma-separated stop token IDs" ) parser.add_argument("--host", type=str, default=None) parser.add_argument("--port", type=int, default=8001) return parser.parse_args() def build_gradio_interface(client, model_name, temp, stop_token_ids): def chat_predict(message, history): return predict(message, history, client, model_name, temp, stop_token_ids) return gr.ChatInterface( fn=chat_predict, title="Chatbot Interface", description="A simple chatbot powered by vLLM", ) def main(): # Parse the arguments args = parse_args() # Set OpenAI's API key and API base to use vLLM's API server openai_api_key = "EMPTY" openai_api_base = args.model_url # Create an OpenAI client client = OpenAI(api_key=openai_api_key, base_url=openai_api_base) # Define the Gradio chatbot interface using the predict function gradio_interface = build_gradio_interface( client, args.model, args.temp, args.stop_token_ids ) gradio_interface.queue().launch( server_name=args.host, server_port=args.port, share=True ) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_client.py
examples/online_serving/openai_chat_completion_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Example Python client for OpenAI Chat Completion using vLLM API server NOTE: start a supported chat completion model server with `vllm serve`, e.g. vllm serve meta-llama/Llama-2-7b-chat-hf """ import argparse from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who won the world series in 2020?"}, { "role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020.", }, {"role": "user", "content": "Where was it played?"}, ] def parse_args(): parser = argparse.ArgumentParser(description="Client for vLLM API server") parser.add_argument( "--stream", action="store_true", help="Enable streaming response" ) return parser.parse_args() def main(args): client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id # Chat Completion API chat_completion = client.chat.completions.create( messages=messages, model=model, stream=args.stream, ) print("-" * 50) print("Chat completion results:") if args.stream: for c in chat_completion: print(c) else: print(chat_completion) print("-" * 50) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_client_for_multimodal.py
examples/online_serving/openai_chat_completion_client_for_multimodal.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """An example showing how to use vLLM to serve multimodal models and run online serving with OpenAI client. Launch the vLLM server with the following command: (single image inference with Llava) vllm serve llava-hf/llava-1.5-7b-hf (multi-image inference with Phi-3.5-vision-instruct) vllm serve microsoft/Phi-3.5-vision-instruct --runner generate \ --trust-remote-code --max-model-len 4096 --limit-mm-per-prompt '{"image":2}' (audio inference with Ultravox) vllm serve fixie-ai/ultravox-v0_5-llama-3_2-1b \ --max-model-len 4096 --trust-remote-code run the script with python openai_chat_completion_client_for_multimodal.py --chat-type audio """ import base64 import os import requests from openai import OpenAI from utils import get_first_model from vllm.utils.argparse_utils import FlexibleArgumentParser # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) headers = {"User-Agent": "vLLM Example Client"} def encode_base64_content_from_url(content_url: str) -> str: """Encode a content retrieved from a remote url to base64 format.""" with requests.get(content_url, headers=headers) as response: response.raise_for_status() result = base64.b64encode(response.content).decode("utf-8") return result def encode_base64_content_from_file(file_path: str) -> str: """Encode a local file content to base64 format.""" with open(file_path, "rb") as file: file_content = file.read() result = base64.b64encode(file_content).decode("utf-8") return result # Text-only inference def run_text_only(model: str, max_completion_tokens: int) -> None: chat_completion = client.chat.completions.create( messages=[{"role": "user", "content": "What's the capital of France?"}], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion.choices[0].message.content print("Chat completion output:\n", result) # Single-image input inference def run_single_image(model: str, max_completion_tokens: int) -> None: ## Use image url in the payload image_url = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" image_file = "/path/to/image.jpg" # local file chat_completion_from_url = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this image?"}, { "type": "image_url", "image_url": {"url": image_url}, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_url.choices[0].message.content print("Chat completion output from image url:\n", result) ## Use local image url in the payload # Launch the API server/engine with the --allowed-local-media-path argument. if os.path.exists(image_file): chat_completion_from_local_image_url = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this image?"}, { "type": "image_url", "image_url": {"url": f"file://{image_file}"}, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_local_image_url.choices[0].message.content print("Chat completion output from local image file:\n", result) else: print(f"Local image file not found at {image_file}, skipping local file test.") ## Use base64 encoded image in the payload image_base64 = encode_base64_content_from_url(image_url) chat_completion_from_base64 = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this image?"}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_base64.choices[0].message.content print("Chat completion output from base64 encoded image:", result) ## Use base64 encoded local image in the payload if os.path.exists(image_file): local_image_base64 = encode_base64_content_from_file(image_file) chat_completion_from_local_image_base64 = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this image?"}, { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{local_image_base64}" }, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_local_image_base64.choices[0].message.content print("Chat completion output from base64 encoded local image:", result) else: print(f"Local image file not found at {image_file}, skipping local file test.") # Multi-image input inference def run_multi_image(model: str, max_completion_tokens: int) -> None: image_url_duck = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/duck.jpg" image_url_lion = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/lion.jpg" chat_completion_from_url = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What are the animals in these images?"}, { "type": "image_url", "image_url": {"url": image_url_duck}, }, { "type": "image_url", "image_url": {"url": image_url_lion}, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_url.choices[0].message.content print("Chat completion output:\n", result) # Video input inference def run_video(model: str, max_completion_tokens: int) -> None: video_url = "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4" video_base64 = encode_base64_content_from_url(video_url) ## Use video url in the payload chat_completion_from_url = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this video?"}, { "type": "video_url", "video_url": {"url": video_url}, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_url.choices[0].message.content print("Chat completion output from video url:\n", result) ## Use base64 encoded video in the payload chat_completion_from_base64 = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this video?"}, { "type": "video_url", "video_url": {"url": f"data:video/mp4;base64,{video_base64}"}, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_base64.choices[0].message.content print("Chat completion output from base64 encoded video:\n", result) # Audio input inference def run_audio(model: str, max_completion_tokens: int) -> None: from vllm.assets.audio import AudioAsset audio_url = AudioAsset("winning_call").url audio_base64 = encode_base64_content_from_url(audio_url) # OpenAI-compatible schema (`input_audio`) chat_completion_from_base64 = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this audio?"}, { "type": "input_audio", "input_audio": { # Any format supported by librosa is supported "data": audio_base64, "format": "wav", }, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_base64.choices[0].message.content print("Chat completion output from input audio:\n", result) # HTTP URL chat_completion_from_url = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this audio?"}, { "type": "audio_url", "audio_url": { # Any format supported by librosa is supported "url": audio_url }, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_url.choices[0].message.content print("Chat completion output from audio url:\n", result) # base64 URL chat_completion_from_base64 = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "What's in this audio?"}, { "type": "audio_url", "audio_url": { # Any format supported by librosa is supported "url": f"data:audio/ogg;base64,{audio_base64}" }, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_base64.choices[0].message.content print("Chat completion output from base64 encoded audio:\n", result) def run_multi_audio(model: str, max_completion_tokens: int) -> None: from vllm.assets.audio import AudioAsset # Two different audios to showcase batched inference. audio_url = AudioAsset("winning_call").url audio_base64 = encode_base64_content_from_url(audio_url) audio_url2 = AudioAsset("azacinto_foscolo").url audio_base64_2 = encode_base64_content_from_url(audio_url2) # OpenAI-compatible schema (`input_audio`) chat_completion_from_base64 = client.chat.completions.create( messages=[ { "role": "user", "content": [ {"type": "text", "text": "Are these two audios the same?"}, { "type": "input_audio", "input_audio": { "data": audio_base64, "format": "wav", }, }, { "type": "input_audio", "input_audio": { "data": audio_base64_2, "format": "wav", }, }, ], } ], model=model, max_completion_tokens=max_completion_tokens, ) result = chat_completion_from_base64.choices[0].message.content print("Chat completion output from input audio:\n", result) example_function_map = { "text-only": run_text_only, "single-image": run_single_image, "multi-image": run_multi_image, "multi-audio": run_multi_audio, "video": run_video, "audio": run_audio, } def parse_args(): parser = FlexibleArgumentParser( description="Demo on using OpenAI client for online serving with " "multimodal language models served with vLLM." ) parser.add_argument( "--chat-type", "-c", type=str, default="single-image", choices=list(example_function_map.keys()), help="Conversation type with multimodal data.", ) parser.add_argument( "--max-completion-tokens", "-n", type=int, default=128, help="Maximum number of tokens to generate for each completion.", ) return parser.parse_args() def main(args) -> None: chat_type = args.chat_type model = get_first_model(client) example_function_map[chat_type](model, args.max_completion_tokens) if __name__ == "__main__": args = parse_args() main(args)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_with_reasoning_streaming.py
examples/online_serving/openai_chat_completion_with_reasoning_streaming.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ An example shows how to generate chat completions from reasoning models like DeepSeekR1. To run this example, you need to start the vLLM server with the reasoning parser: ```bash vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ --reasoning-parser deepseek_r1 ``` Unlike openai_chat_completion_with_reasoning.py, this example demonstrates the streaming chat completions feature. The streaming chat completions feature allows you to receive chat completions in real-time as they are generated by the model. This is useful for scenarios where you want to display chat completions to the user as they are generated by the model. Remember to check content and reasoning exist in `ChatCompletionChunk`, content may not exist leading to errors if you try to access it. """ from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" messages = [{"role": "user", "content": "9.11 and 9.8, which is greater?"}] def main(): client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id # ruff: noqa: E501 # For granite: add: `extra_body={"chat_template_kwargs": {"thinking": True}}` stream = client.chat.completions.create(model=model, messages=messages, stream=True) print("client: Start streaming chat completions...") printed_reasoning = False printed_content = False for chunk in stream: # Safely extract reasoning and content from delta, # defaulting to None if attributes don't exist or are empty strings reasoning = getattr(chunk.choices[0].delta, "reasoning", None) or None content = getattr(chunk.choices[0].delta, "content", None) or None if reasoning is not None: if not printed_reasoning: printed_reasoning = True print("reasoning:", end="", flush=True) print(reasoning, end="", flush=True) elif content is not None: if not printed_content: printed_content = True print("\ncontent:", end="", flush=True) # Extract and print the content print(content, end="", flush=True) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_client_with_tools.py
examples/online_serving/openai_chat_completion_client_with_tools.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ Set up this example by starting a vLLM OpenAI-compatible server with tool call options enabled. For example: IMPORTANT: for mistral, you must use one of the provided mistral tool call templates, or your own - the model default doesn't work for tool calls with vLLM See the vLLM docs on OpenAI server & tool calling for more details. vllm serve mistralai/Mistral-7B-Instruct-v0.3 \ --chat-template examples/tool_chat_template_mistral.jinja \ --enable-auto-tool-choice --tool-call-parser mistral OR vllm serve NousResearch/Hermes-2-Pro-Llama-3-8B \ --chat-template examples/tool_chat_template_hermes.jinja \ --enable-auto-tool-choice --tool-call-parser hermes """ import json from typing import Any from openai import OpenAI # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" properties = { "city": { "type": "string", "description": "The city to find the weather for, e.g. 'San Francisco'", }, "state": { "type": "string", "description": "the two-letter abbreviation for the state that the city is" " in, e.g. 'CA' which would mean 'California'", }, "unit": { "type": "string", "description": "The unit to fetch the temperature in", "enum": ["celsius", "fahrenheit"], }, } tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": properties, "required": ["city", "state", "unit"], }, }, } ] messages = [ {"role": "user", "content": "Hi! How are you doing today?"}, {"role": "assistant", "content": "I'm doing well! How can I help you?"}, { "role": "user", "content": ( "Can you tell me what the temperate will be in Dallas, in fahrenheit?" ), }, ] def get_current_weather(city: str, state: str, unit: "str"): return ( "The weather in Dallas, Texas is 85 degrees fahrenheit. It is " "partly cloudly, with highs in the 90's." ) def handle_tool_calls_stream( client: OpenAI, messages: list[dict[str, str]], model: str, tools: list[dict[str, Any]], ) -> list[Any]: tool_calls_stream = client.chat.completions.create( messages=messages, model=model, tools=tools, stream=True ) chunks = [] print("chunks: ") for chunk in tool_calls_stream: chunks.append(chunk) if chunk.choices[0].delta.tool_calls: print(chunk.choices[0].delta.tool_calls[0]) else: print(chunk.choices[0].delta) return chunks def handle_tool_calls_arguments(chunks: list[Any]) -> list[str]: arguments = [] tool_call_idx = -1 print("arguments: ") for chunk in chunks: if chunk.choices[0].delta.tool_calls: tool_call = chunk.choices[0].delta.tool_calls[0] if tool_call.index != tool_call_idx: if tool_call_idx >= 0: print(f"streamed tool call arguments: {arguments[tool_call_idx]}") tool_call_idx = chunk.choices[0].delta.tool_calls[0].index arguments.append("") if tool_call.id: print(f"streamed tool call id: {tool_call.id} ") if tool_call.function: if tool_call.function.name: print(f"streamed tool call name: {tool_call.function.name}") if tool_call.function.arguments: arguments[tool_call_idx] += tool_call.function.arguments return arguments def main(): # Initialize OpenAI client client = OpenAI( # defaults to os.environ.get("OPENAI_API_KEY") api_key=openai_api_key, base_url=openai_api_base, ) # Get available models and select one models = client.models.list() model = models.data[0].id chat_completion = client.chat.completions.create( messages=messages, model=model, tools=tools ) print("-" * 70) print("Chat completion results:") print(chat_completion) print("-" * 70) # Stream tool calls chunks = handle_tool_calls_stream(client, messages, model, tools) print("-" * 70) # Handle arguments from streamed tool calls arguments = handle_tool_calls_arguments(chunks) if len(arguments): print(f"streamed tool call arguments: {arguments[-1]}\n") print("-" * 70) # Add tool call results to the conversation messages.append( { "role": "assistant", "tool_calls": chat_completion.choices[0].message.tool_calls, "reasoning": chat_completion.choices[0].message.reasoning, } ) # Now, simulate a tool call available_tools = {"get_current_weather": get_current_weather} completion_tool_calls = chat_completion.choices[0].message.tool_calls for call in completion_tool_calls: tool_to_call = available_tools[call.function.name] args = json.loads(call.function.arguments) result = tool_to_call(**args) print("tool_to_call result: ", result) messages.append( { "role": "tool", "content": result, "tool_call_id": call.id, "name": call.function.name, } ) chat_completion_2 = client.chat.completions.create( messages=messages, model=model, tools=tools, stream=False ) print("Chat completion2 results:") print(chat_completion_2) print("-" * 70) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/kv_events_subscriber.py
examples/online_serving/kv_events_subscriber.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from typing import Any import msgspec import zmq from msgspec.msgpack import Decoder from vllm.v1.core.kv_cache_utils import ExternalBlockHash # # Types copied from vllm.distributed.kv_events # class EventBatch(msgspec.Struct, array_like=True, omit_defaults=True, gc=False): ts: float events: list[Any] class KVCacheEvent( msgspec.Struct, array_like=True, omit_defaults=True, gc=False, tag=True ): """Base class for all KV cache-related events""" class BlockStored(KVCacheEvent): block_hashes: list[ExternalBlockHash] parent_block_hash: ExternalBlockHash | None token_ids: list[int] block_size: int lora_id: int | None """Deprecated: use `lora_name` for KV block key hash. Retained for backward compatibility. """ medium: str | None lora_name: str | None class BlockRemoved(KVCacheEvent): block_hashes: list[ExternalBlockHash] medium: str | None class AllBlocksCleared(KVCacheEvent): pass class KVEventBatch(EventBatch): events: list[BlockStored | BlockRemoved | AllBlocksCleared] def process_event(event_batch): print(f"Received event batch at {event_batch.ts}:") for event in event_batch.events: print(f" - {event}") def main(): decoder = Decoder(type=KVEventBatch) last_seq = -1 context = zmq.Context() # Set up the main subscription socket sub = context.socket(zmq.SUB) sub.connect("tcp://localhost:5557") topic = "kv-events" sub.setsockopt_string(zmq.SUBSCRIBE, topic) # Initialize replay socket replay = context.socket(zmq.REQ) replay.connect("tcp://localhost:5558") poller = zmq.Poller() poller.register(replay, zmq.POLLIN) print("Listening for KV cache events on topic:", topic) while True: try: if sub.poll(50): _, seq_bytes, payload = sub.recv_multipart() seq = int.from_bytes(seq_bytes, "big") if last_seq >= 0 and seq > last_seq + 1: missed = seq - last_seq - 1 print( f"Missed {missed} messages (last: {last_seq}, current: {seq})" ) replay.send((last_seq + 1).to_bytes(8, "big")) while poller.poll(timeout=200): seq_bytes, replay_payload = replay.recv_multipart() if not replay_payload: # End of replay marker is sent as an empty frame # for the payload break replay_seq = int.from_bytes(seq_bytes, "big") if replay_seq > last_seq: event_batch = decoder.decode(replay_payload) process_event(event_batch) last_seq = replay_seq if replay_seq >= seq - 1: break event_batch = decoder.decode(payload) process_event(event_batch) # ... do other periodic work or check for shutdown ... except KeyboardInterrupt: print("Interrupted") break except Exception as e: print("Error decoding message:", e) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/token_generation_client.py
examples/online_serving/token_generation_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import httpx from transformers import AutoTokenizer GEN_ENDPOINT = "http://localhost:8000/inference/v1/generate" DUMMY_API_KEY = "empty" MODEL_NAME = "Qwen/Qwen3-0.6B" transport = httpx.HTTPTransport() headers = {"Authorization": f"Bearer {DUMMY_API_KEY}"} client = httpx.Client( transport=transport, base_url=GEN_ENDPOINT, timeout=600, headers=headers, ) messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "How many countries are in the EU?"}, ] def main(client): tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) token_ids = tokenizer.apply_chat_template( messages, add_generation_prompt=True, enable_thinking=False, ) payload = { "model": MODEL_NAME, "token_ids": token_ids, "sampling_params": {"max_tokens": 24, "temperature": 0.2, "detokenize": False}, "stream": False, } resp = client.post(GEN_ENDPOINT, json=payload) resp.raise_for_status() data = resp.json() print(data) print("-" * 50) print("Token generation results:") res = tokenizer.decode(data["choices"][0]["token_ids"]) print(res) print("-" * 50) if __name__ == "__main__": main(client)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/openai_chat_completion_tool_calls_with_reasoning.py
examples/online_serving/openai_chat_completion_tool_calls_with_reasoning.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ An example demonstrates how to use tool calling with reasoning models like QwQ-32B. The reasoning will not be parsed by the tool calling process; only the final output will be parsed. To run this example, you need to start the vLLM server with both the reasoning parser and tool calling enabled. ```bash vllm serve Qwen/QwQ-32B \ --reasoning-parser deepseek_r1 \ --enable-auto-tool-choice --tool-call-parser hermes ``` """ from openai import OpenAI # Now, simulate a tool call def get_current_weather(city: str, state: str, unit: "str"): return ( "The weather in Dallas, Texas is 85 degrees fahrenheit. It is " "partly cloudly, with highs in the 90's." ) available_tools = {"get_current_weather": get_current_weather} # Modify OpenAI's API key and API base to use vLLM's API server. openai_api_key = "EMPTY" openai_api_base = "http://localhost:8000/v1" properties = { "city": { "type": "string", "description": "The city to find the weather for, e.g. 'San Francisco'", }, "state": { "type": "string", "description": "the two-letter abbreviation for the state that the city is" " in, e.g. 'CA' which would mean 'California'", }, "unit": { "type": "string", "description": "The unit to fetch the temperature in", "enum": ["celsius", "fahrenheit"], }, } tools = [ { "type": "function", "function": { "name": "get_current_weather", "description": "Get the current weather in a given location", "parameters": { "type": "object", "properties": properties, "required": ["city", "state", "unit"], }, }, } ] messages = [ {"role": "user", "content": "Hi! How are you doing today?"}, {"role": "assistant", "content": "I'm doing well! How can I help you?"}, { "role": "user", "content": ( "Can you tell me what the temperate will be in Dallas, in fahrenheit?" ), }, ] def extract_reasoning_and_calls(chunks: list): reasoning = "" tool_call_idx = -1 arguments = [] function_names = [] for chunk in chunks: if chunk.choices[0].delta.tool_calls: tool_call = chunk.choices[0].delta.tool_calls[0] if tool_call.index != tool_call_idx: tool_call_idx = chunk.choices[0].delta.tool_calls[0].index arguments.append("") function_names.append("") if tool_call.function: if tool_call.function.name: function_names[tool_call_idx] = tool_call.function.name if tool_call.function.arguments: arguments[tool_call_idx] += tool_call.function.arguments else: if hasattr(chunk.choices[0].delta, "reasoning"): reasoning += chunk.choices[0].delta.reasoning return reasoning, arguments, function_names def main(): client = OpenAI( api_key=openai_api_key, base_url=openai_api_base, ) models = client.models.list() model = models.data[0].id print("---------Full Generate With Automatic Function Calling-------------") tool_calls = client.chat.completions.create( messages=messages, model=model, tools=tools ) print(f"reasoning: {tool_calls.choices[0].message.reasoning}") print(f"function name: {tool_calls.choices[0].message.tool_calls[0].function.name}") print( f"function arguments: " f"{tool_calls.choices[0].message.tool_calls[0].function.arguments}" ) print("----------Stream Generate With Automatic Function Calling-----------") tool_calls_stream = client.chat.completions.create( messages=messages, model=model, tools=tools, stream=True ) chunks = list(tool_calls_stream) reasoning, arguments, function_names = extract_reasoning_and_calls(chunks) print(f"reasoning: {reasoning}") print(f"function name: {function_names[0]}") print(f"function arguments: {arguments[0]}") print("----------Full Generate With Named Function Calling-----------------") tool_calls = client.chat.completions.create( messages=messages, model=model, tools=tools, tool_choice={"type": "function", "function": {"name": "get_current_weather"}}, ) tool_call = tool_calls.choices[0].message.tool_calls[0].function print(f"reasoning: {tool_calls.choices[0].message.reasoning}") print(f"function name: {tool_call.name}") print(f"function arguments: {tool_call.arguments}") print("----------Stream Generate With Named Function Calling--------------") tool_calls_stream = client.chat.completions.create( messages=messages, model=model, tools=tools, tool_choice={"type": "function", "function": {"name": "get_current_weather"}}, stream=True, ) chunks = list(tool_calls_stream) reasoning, arguments, function_names = extract_reasoning_and_calls(chunks) print(f"reasoning: {reasoning}") print(f"function name: {function_names[0]}") print(f"function arguments: {arguments[0]}") print("\n\n") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/structured_outputs/structured_outputs.py
examples/online_serving/structured_outputs/structured_outputs.py
# ruff: noqa: E501 # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import asyncio import enum import os from typing import Any, Literal import openai import pydantic from openai.types.chat import ChatCompletionChunk ConstraintsFormat = Literal[ "choice", "regex", "json", "grammar", "structural_tag", ] async def print_stream_response( stream_response: openai.AsyncStream[ChatCompletionChunk], title: str, args: argparse.Namespace, ): print(f"\n\n{title} (Streaming):") local_reasoning_header_printed = False local_content_header_printed = False async for chunk in stream_response: delta = chunk.choices[0].delta reasoning_chunk_text: str | None = getattr(delta, "reasoning", None) content_chunk_text = delta.content if args.reasoning: if reasoning_chunk_text: if not local_reasoning_header_printed: print(" Reasoning: ", end="") local_reasoning_header_printed = True print(reasoning_chunk_text, end="", flush=True) if content_chunk_text: if not local_content_header_printed: if local_reasoning_header_printed: print() print(" Content: ", end="") local_content_header_printed = True print(content_chunk_text, end="", flush=True) else: if content_chunk_text: if not local_content_header_printed: print(" Content: ", end="") local_content_header_printed = True print(content_chunk_text, end="", flush=True) print() class CarType(str, enum.Enum): SEDAN = "SEDAN" SUV = "SUV" TRUCK = "TRUCK" COUPE = "COUPE" class CarDescription(pydantic.BaseModel): brand: str model: str car_type: CarType PARAMS: dict[ConstraintsFormat, dict[str, Any]] = { "choice": { "messages": [ { "role": "user", "content": "Classify this sentiment: vLLM is wonderful!", } ], "extra_body": {"structured_outputs": {"choice": ["positive", "negative"]}}, }, "regex": { "messages": [ { "role": "user", "content": "Generate an email address for Alan Turing, who works in Enigma. End in .com and new line. Example result: 'alan.turing@enigma.com\n'", } ], "extra_body": { "structured_outputs": {"regex": r"[a-z0-9.]{1,20}@\w{6,10}\.com\n"}, }, }, "json": { "messages": [ { "role": "user", "content": "Generate a JSON with the brand, model and car_type of the most iconic car from the 90's", } ], "response_format": { "type": "json_schema", "json_schema": { "name": "car-description", "schema": CarDescription.model_json_schema(), }, }, }, "grammar": { "messages": [ { "role": "user", "content": "Generate an SQL query to show the 'username' and 'email' from the 'users' table.", } ], "extra_body": { "structured_outputs": { "grammar": """ root ::= select_statement select_statement ::= "SELECT " column " from " table " where " condition column ::= "col_1 " | "col_2 " table ::= "table_1 " | "table_2 " condition ::= column "= " number number ::= "1 " | "2 " """, } }, }, "structural_tag": { "messages": [ { "role": "user", "content": """ You have access to the following function to retrieve the weather in a city: { "name": "get_weather", "parameters": { "city": { "param_type": "string", "description": "The city to get the weather for", "required": True } } } If a you choose to call a function ONLY reply in the following format: <{start_tag}={function_name}>{parameters}{end_tag} where start_tag => `<function` parameters => a JSON dict with the function argument name as key and function argument value as value. end_tag => `</function>` Here is an example, <function=example_function_name>{"example_name": "example_value"}</function> Reminder: - Function calls MUST follow the specified format - Required parameters MUST be specified - Only call one function at a time - Put the entire function call reply on one line - Always add your sources when using search results to answer the user query You are a helpful assistant. Given the previous instructions, what is the weather in New York City, Boston, and San Francisco?""", }, ], "response_format": { "type": "structural_tag", "structures": [ { "begin": "<function=get_weather>", "schema": { "type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"], }, "end": "</function>", } ], "triggers": ["<function="], }, }, } async def cli(): parser = argparse.ArgumentParser( description="Run OpenAI Chat Completion with various structured outputs capabilities", ) _ = parser.add_argument( "--constraint", type=str, nargs="+", choices=[*list(PARAMS), "*"], default=["*"], help="Specify which constraint(s) to run.", ) _ = parser.add_argument( "--stream", action=argparse.BooleanOptionalAction, default=False, help="Enable streaming output", ) _ = parser.add_argument( "--reasoning", action=argparse.BooleanOptionalAction, default=False, help="Enable printing of reasoning traces if available.", ) args = parser.parse_args() base_url = os.getenv("OPENAI_BASE_URL", "http://localhost:8000/v1") client = openai.AsyncOpenAI(base_url=base_url, api_key="EMPTY") constraints = list(PARAMS) if "*" in args.constraint else list(set(args.constraint)) model = (await client.models.list()).data[0].id if args.stream: results = await asyncio.gather( *[ client.chat.completions.create( model=model, max_tokens=1024, stream=True, **PARAMS[name], ) for name in constraints ] ) for constraint, stream in zip(constraints, results): await print_stream_response(stream, constraint, args) else: results = await asyncio.gather( *[ client.chat.completions.create( model=model, max_tokens=1024, stream=False, **PARAMS[name], ) for name in constraints ] ) for constraint, response in zip(constraints, results): print(f"\n\n{constraint}:") message = response.choices[0].message if args.reasoning and hasattr(message, "reasoning"): print(f" Reasoning: {message.reasoning or ''}") print(f" Content: {message.content!r}") def main(): asyncio.run(cli()) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/elastic_ep/scale.py
examples/online_serving/elastic_ep/scale.py
#!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse import json import sys import requests def scale(host, port, new_dp_size): url = f"http://{host}:{port}/scale_elastic_ep" payload = {"new_data_parallel_size": new_dp_size} headers = {"Content-Type": "application/json"} print(f"Sending scale request to {url}") print(f"Payload: {json.dumps(payload, indent=2)}") try: response = requests.post(url, json=payload, headers=headers, timeout=300) print(f"Status Code: {response.status_code}") print(f"Response: {response.text}") if response.status_code == 200: print("Scale up/down request successful!") return True else: print("Scale up/down request failed!") return False except requests.exceptions.RequestException as e: print(f"Request failed: {e}") return False def main(): parser = argparse.ArgumentParser(description="Test scale up/down functionality") parser.add_argument("--host", default="localhost", help="API server host") parser.add_argument("--port", type=int, default=8006, help="API server port") parser.add_argument( "--new-dp-size", type=int, default=2, help="New data parallel size" ) args = parser.parse_args() success = scale(args.host, args.port, args.new_dp_size) sys.exit(0 if success else 1) if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/disaggregated_serving_p2p_nccl_xpyd/disagg_proxy_p2p_nccl_xpyd.py
examples/online_serving/disaggregated_serving_p2p_nccl_xpyd/disagg_proxy_p2p_nccl_xpyd.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import os import socket import threading import time import uuid from typing import Any import aiohttp import msgpack import zmq from quart import Quart, make_response, request count = 0 prefill_instances: dict[str, Any] = {} # http_address: (zmq_address, stamp) decode_instances: dict[str, Any] = {} # http_address: (zmq_address, stamp) prefill_cv = threading.Condition() decode_cv = threading.Condition() DEFAULT_PING_SECONDS = 5 def _remove_oldest_instances(instances: dict[str, Any]) -> None: oldest_key = next(iter(instances), None) while oldest_key is not None: value = instances[oldest_key] if value[1] > time.time(): break print(f"🔴Remove [HTTP:{oldest_key}, ZMQ:{value[0]}, stamp:{value[1]}]") instances.pop(oldest_key, None) oldest_key = next(iter(instances), None) def _listen_for_register(poller, router_socket): while True: socks = dict(poller.poll()) if router_socket in socks: remote_address, message = router_socket.recv_multipart() # data: {"type": "P", "http_address": "ip:port", # "zmq_address": "ip:port"} data = msgpack.loads(message) if data["type"] == "P": global prefill_instances global prefill_cv with prefill_cv: node = prefill_instances.get(data["http_address"], None) prefill_instances[data["http_address"]] = ( data["zmq_address"], time.time() + DEFAULT_PING_SECONDS, ) _remove_oldest_instances(prefill_instances) elif data["type"] == "D": global decode_instances global decode_cv with decode_cv: node = decode_instances.get(data["http_address"], None) decode_instances[data["http_address"]] = ( data["zmq_address"], time.time() + DEFAULT_PING_SECONDS, ) _remove_oldest_instances(decode_instances) else: print( "Unexpected, Received message from %s, data: %s", remote_address, data, ) return if node is None: print(f"🔵Add [HTTP:{data['http_address']}, ZMQ:{data['zmq_address']}]") def start_service_discovery(hostname, port): if not hostname: hostname = socket.gethostname() if port == 0: raise ValueError("Port cannot be 0") context = zmq.Context() router_socket = context.socket(zmq.ROUTER) router_socket.bind(f"tcp://{hostname}:{port}") poller = zmq.Poller() poller.register(router_socket, zmq.POLLIN) _listener_thread = threading.Thread( target=_listen_for_register, args=[poller, router_socket], daemon=True ) _listener_thread.start() return _listener_thread AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60) app = Quart(__name__) def random_uuid() -> str: return str(uuid.uuid4().hex) async def forward_request(url, data, request_id): async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: headers = { "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}", "X-Request-Id": request_id, } async with session.post(url=url, json=data, headers=headers) as response: if response.status == 200: if True: async for chunk_bytes in response.content.iter_chunked(1024): yield chunk_bytes else: content = await response.read() yield content @app.route("/v1/completions", methods=["POST"]) @app.route("/v1/chat/completions", methods=["POST"]) async def handle_request(): try: original_request_data = await request.get_json() prefill_request = original_request_data.copy() # change max_tokens = 1 to let it only do prefill prefill_request["max_tokens"] = 1 if "max_completion_tokens" in prefill_request: prefill_request["max_completion_tokens"] = 1 global count global prefill_instances global prefill_cv with prefill_cv: prefill_list = list(prefill_instances.items()) prefill_addr, prefill_zmq_addr = prefill_list[count % len(prefill_list)] prefill_zmq_addr = prefill_zmq_addr[0] global decode_instances global decode_cv with decode_cv: decode_list = list(decode_instances.items()) decode_addr, decode_zmq_addr = decode_list[count % len(decode_list)] decode_zmq_addr = decode_zmq_addr[0] print( f"handle_request count: {count}, [HTTP:{prefill_addr}, " f"ZMQ:{prefill_zmq_addr}] 👉 [HTTP:{decode_addr}, " f"ZMQ:{decode_zmq_addr}]" ) count += 1 request_id = ( f"___prefill_addr_{prefill_zmq_addr}___decode_addr_" f"{decode_zmq_addr}_{random_uuid()}" ) # finish prefill async for _ in forward_request( f"http://{prefill_addr}{request.path}", prefill_request, request_id ): continue # return decode generator = forward_request( f"http://{decode_addr}{request.path}", original_request_data, request_id ) response = await make_response(generator) response.timeout = None return response except Exception as e: import sys import traceback exc_info = sys.exc_info() print("Error occurred in disagg prefill proxy server") print(e) print("".join(traceback.format_exception(*exc_info))) if __name__ == "__main__": t = start_service_discovery("0.0.0.0", 30001) app.run(host="0.0.0.0", port=10001) t.join()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/opentelemetry/dummy_client.py
examples/online_serving/opentelemetry/dummy_client.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import requests from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter from opentelemetry.trace import SpanKind, set_tracer_provider from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator trace_provider = TracerProvider() set_tracer_provider(trace_provider) trace_provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter())) trace_provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter())) tracer = trace_provider.get_tracer("dummy-client") url = "http://localhost:8000/v1/completions" with tracer.start_as_current_span("client-span", kind=SpanKind.CLIENT) as span: prompt = "San Francisco is a" span.set_attribute("prompt", prompt) headers = {} TraceContextTextMapPropagator().inject(headers) payload = { "model": "facebook/opt-125m", "prompt": prompt, "max_tokens": 10, "n": 3, "use_beam_search": "true", "temperature": 0.0, # "stream": True, } response = requests.post(url, headers=headers, json=payload)
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/disaggregated_serving/disagg_proxy_demo.py
examples/online_serving/disaggregated_serving/disagg_proxy_demo.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file provides a disaggregated prefilling proxy demo to demonstrate an example usage of XpYd disaggregated prefilling. We can launch multiple vllm instances (2 for prefill and 2 for decode), and launch this proxy demo through: python3 examples/online_serving/disaggregated_serving/disagg_proxy_demo.py \ --model $model_name \ --prefill localhost:8100 localhost:8101 \ --decode localhost:8200 localhost:8201 \ --port 8000 Note: This demo will be removed once the PDController implemented in PR 15343 (https://github.com/vllm-project/vllm/pull/15343) supports XpYd. """ import argparse import ipaddress import itertools import json import logging import os import sys from abc import ABC, abstractmethod from collections.abc import Callable import aiohttp import requests import uvicorn from fastapi import APIRouter, Depends, FastAPI, Header, HTTPException, Request, status from fastapi.responses import JSONResponse, StreamingResponse AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60) logger = logging.getLogger() logging.basicConfig(level=logging.INFO) class SchedulingPolicy(ABC): @abstractmethod def schedule(self, cycler: itertools.cycle): raise NotImplementedError("Scheduling Proxy is not set.") class Proxy: def __init__( self, prefill_instances: list[str], decode_instances: list[str], model: str, scheduling_policy: SchedulingPolicy, custom_create_completion: Callable[[Request], StreamingResponse] | None = None, custom_create_chat_completion: Callable[[Request], StreamingResponse] | None = None, ): self.prefill_instances = prefill_instances self.decode_instances = decode_instances self.prefill_cycler = itertools.cycle(prefill_instances) self.decode_cycler = itertools.cycle(decode_instances) self.model = model self.scheduling_policy = scheduling_policy self.custom_create_completion = custom_create_completion self.custom_create_chat_completion = custom_create_chat_completion self.router = APIRouter() self.setup_routes() def setup_routes(self): self.router.post( "/v1/completions", dependencies=[Depends(self.validate_json_request)] )( self.custom_create_completion if self.custom_create_completion else self.create_completion ) self.router.post( "/v1/chat/completions", dependencies=[Depends(self.validate_json_request)] )( self.custom_create_chat_completion if self.custom_create_chat_completion else self.create_chat_completion ) self.router.get("/status", response_class=JSONResponse)(self.get_status) self.router.post( "/instances/add", dependencies=[Depends(self.api_key_authenticate)] )(self.add_instance_endpoint) async def validate_json_request(self, raw_request: Request): content_type = raw_request.headers.get("content-type", "").lower() if content_type != "application/json": raise HTTPException( status_code=415, detail="Unsupported Media Type: Only 'application/json' is allowed", ) def api_key_authenticate(self, x_api_key: str = Header(...)): expected_api_key = os.environ.get("ADMIN_API_KEY") if not expected_api_key: logger.error("ADMIN_API_KEY is not set in the environment.") raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Server configuration error.", ) if x_api_key != expected_api_key: logger.warning("Unauthorized access attempt with API Key: %s", x_api_key) raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="Forbidden: Invalid API Key.", ) async def validate_instance(self, instance: str) -> bool: url = f"http://{instance}/v1/models" try: async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as client: logger.info("Verifying %s ...", instance) async with client.get(url) as response: if response.status == 200: data = await response.json() if "data" in data and len(data["data"]) > 0: model_cur = data["data"][0].get("id", "") if model_cur == self.model: logger.info("Instance: %s could be added.", instance) return True else: logger.warning( "Mismatch model %s : %s != %s", instance, model_cur, self.model, ) return False else: return False else: return False except aiohttp.ClientError as e: logger.error(str(e)) return False except Exception as e: logger.error(str(e)) return False async def add_instance_endpoint(self, request: Request): try: data = await request.json() logger.warning(str(data)) instance_type = data.get("type") instance = data.get("instance") if instance_type not in ["prefill", "decode"]: raise HTTPException(status_code=400, detail="Invalid instance type.") if not instance or ":" not in instance: raise HTTPException(status_code=400, detail="Invalid instance format.") host, port_str = instance.split(":") try: if host != "localhost": ipaddress.ip_address(host) port = int(port_str) if not (0 < port < 65536): raise HTTPException(status_code=400, detail="Invalid port number.") except Exception as e: raise HTTPException( status_code=400, detail="Invalid instance address." ) from e is_valid = await self.validate_instance(instance) if not is_valid: raise HTTPException( status_code=400, detail="Instance validation failed." ) if instance_type == "prefill": if instance not in self.prefill_instances: self.prefill_instances.append(instance) self.prefill_cycler = itertools.cycle(self.prefill_instances) else: raise HTTPException( status_code=400, detail="Instance already exists." ) else: if instance not in self.decode_instances: self.decode_instances.append(instance) self.decode_cycler = itertools.cycle(self.decode_instances) else: raise HTTPException( status_code=400, detail="Instance already exists." ) return JSONResponse( content={"message": f"Added {instance} to {instance_type}_instances."} ) except HTTPException as http_exc: raise http_exc except Exception as e: logger.error("Error in add_instance_endpoint: %s", str(e)) raise HTTPException(status_code=500, detail=str(e)) from e async def forward_request(self, url, data, use_chunked=True): async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: headers = {"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}"} try: async with session.post( url=url, json=data, headers=headers ) as response: if 200 <= response.status < 300 or 400 <= response.status < 500: if use_chunked: async for chunk_bytes in response.content.iter_chunked( 1024 ): yield chunk_bytes else: content = await response.read() yield content else: error_content = await response.text() try: error_content = json.loads(error_content) except json.JSONDecodeError: error_content = error_content logger.error( "Request failed with status %s: %s", response.status, error_content, ) raise HTTPException( status_code=response.status, detail=f"Request failed with status {response.status}: " f"{error_content}", ) except aiohttp.ClientError as e: logger.error("ClientError occurred: %s", str(e)) raise HTTPException( status_code=502, detail="Bad Gateway: Error communicating with upstream server.", ) from e except Exception as e: logger.error("Unexpected error: %s", str(e)) raise HTTPException(status_code=500, detail=str(e)) from e def schedule(self, cycler: itertools.cycle) -> str: return self.scheduling_policy.schedule(cycler) async def get_status(self): status = { "prefill_node_count": len(self.prefill_instances), "decode_node_count": len(self.decode_instances), "prefill_nodes": self.prefill_instances, "decode_nodes": self.decode_instances, } return status async def create_completion(self, raw_request: Request): try: request = await raw_request.json() kv_prepare_request = request.copy() kv_prepare_request["max_tokens"] = 1 prefill_instance = self.schedule(self.prefill_cycler) try: async for _ in self.forward_request( f"http://{prefill_instance}/v1/completions", kv_prepare_request ): continue except HTTPException as http_exc: self.remove_instance_endpoint("prefill", prefill_instance) raise http_exc # Perform kv recv and decoding stage decode_instance = self.schedule(self.decode_cycler) try: generator = self.forward_request( f"http://{decode_instance}/v1/completions", request ) except HTTPException as http_exc: self.remove_instance_endpoint("decode", decode_instance) raise http_exc response = StreamingResponse(generator) return response except Exception: import sys exc_info = sys.exc_info() print("Error occurred in disagg proxy server") print(exc_info) async def create_chat_completion(self, raw_request: Request): try: request = await raw_request.json() # add params to request kv_prepare_request = request.copy() kv_prepare_request["max_tokens"] = 1 if "max_completion_tokens" in kv_prepare_request: kv_prepare_request["max_completion_tokens"] = 1 # prefill stage prefill_instance = self.schedule(self.prefill_cycler) try: async for _ in self.forward_request( f"http://{prefill_instance}/v1/chat/completions", kv_prepare_request ): continue except HTTPException as http_exc: self.remove_instance_endpoint("prefill", prefill_instance) raise http_exc # Perform kv recv and decoding stage decode_instance = self.schedule(self.decode_cycler) try: generator = self.forward_request( "http://" + decode_instance + "/v1/chat/completions", request ) except HTTPException as http_exc: self.remove_instance_endpoint("decode", decode_instance) raise http_exc response = StreamingResponse(content=generator) return response except Exception: exc_info = sys.exc_info() error_messages = [str(e) for e in exc_info if e] print("Error occurred in disagg proxy server") print(error_messages) return StreamingResponse( content=iter(error_messages), media_type="text/event-stream" ) def remove_instance_endpoint(self, instance_type, instance): if instance_type == "decode" and instance in self.decode_instances: self.decode_instances.remove(instance) self.decode_cycler = itertools.cycle(self.decode_instances) if instance_type == "prefill" and instance in self.decode_instances: self.prefill_instances.remove(instance) self.prefill_cycler = itertools.cycle(self.decode_instances) class RoundRobinSchedulingPolicy(SchedulingPolicy): def __init__(self): super().__init__() def schedule(self, cycler: itertools.cycle) -> str: return next(cycler) class ProxyServer: def __init__( self, args: argparse.Namespace, scheduling_policy: SchedulingPolicy | None = None, create_completion: Callable[[Request], StreamingResponse] | None = None, create_chat_completion: Callable[[Request], StreamingResponse] | None = None, ): self.validate_parsed_serve_args(args) self.port = args.port self.proxy_instance = Proxy( prefill_instances=[] if args.prefill is None else args.prefill, decode_instances=[] if args.decode is None else args.decode, model=args.model, scheduling_policy=( scheduling_policy if scheduling_policy is not None else RoundRobinSchedulingPolicy() ), custom_create_completion=create_completion, custom_create_chat_completion=create_chat_completion, ) def validate_parsed_serve_args(self, args: argparse.Namespace): if not args.prefill: raise ValueError("Please specify at least one prefill node.") if not args.decode: raise ValueError("Please specify at least one decode node.") self.validate_instances(args.prefill) self.validate_instances(args.decode) self.verify_model_config(args.prefill, args.model) self.verify_model_config(args.decode, args.model) def validate_instances(self, instances: list): for instance in instances: if len(instance.split(":")) != 2: raise ValueError(f"Invalid instance format: {instance}") host, port = instance.split(":") try: if host != "localhost": ipaddress.ip_address(host) port = int(port) if not (0 < port < 65536): raise ValueError(f"Invalid port number in instance: {instance}") except Exception as e: raise ValueError(f"Invalid instance {instance}: {str(e)}") from e def verify_model_config(self, instances: list, model: str) -> None: model_suffix = model.split("/")[-1] for instance in instances: try: response = requests.get(f"http://{instance}/v1/models") if response.status_code == 200: model_cur = response.json()["data"][0]["id"] model_cur_suffix = model_cur.split("/")[-1] if model_cur_suffix != model_suffix: raise ValueError( f"{instance} serves a different model: " f"{model_cur} != {model}" ) else: raise ValueError(f"Cannot get model id from {instance}!") except requests.RequestException as e: raise ValueError( f"Error communicating with {instance}: {str(e)}" ) from e def run_server(self): app = FastAPI() app.include_router(self.proxy_instance.router) config = uvicorn.Config(app, port=self.port, loop="uvloop") server = uvicorn.Server(config) server.run() def parse_args(): # Todo: allow more config parser = argparse.ArgumentParser("vLLM disaggregated proxy server.") parser.add_argument("--model", "-m", type=str, required=True, help="Model name") parser.add_argument( "--prefill", "-p", type=str, nargs="+", help="List of prefill node URLs (host:port)", ) parser.add_argument( "--decode", "-d", type=str, nargs="+", help="List of decode node URLs (host:port)", ) parser.add_argument( "--port", type=int, default=8000, help="Server port number", ) return parser.parse_args() if __name__ == "__main__": args = parse_args() proxy_server = ProxyServer(args=args) proxy_server.run_server()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/online_serving/disaggregated_encoder/disagg_epd_proxy.py
examples/online_serving/disaggregated_encoder/disagg_epd_proxy.py
#!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ disagg_encoder_proxy.py Proxy that routes OpenAI-compatible “/v1/chat/completions” requests to two clusters: • encode (multimodal feature extraction) • decode (language-model inference) For MM input we: 1. Extract *every* image/audio item. 2. Fire N concurrent requests to the encoder cluster (one request per item, with **all text removed**). 3. Wait for all of them to succeed. 4. Forward the *original* request to a decode server. """ from __future__ import annotations import argparse import asyncio import logging import os import random import uuid from collections.abc import AsyncIterator import aiohttp import uvicorn from fastapi import FastAPI, HTTPException, Request from fastapi.responses import JSONResponse, StreamingResponse ############################################################################### # FastAPI app & global state ############################################################################### logging.basicConfig( level=logging.DEBUG, format="%(asctime)s %(levelname)s: %(message)s" ) logger = logging.getLogger("proxy") app = FastAPI() encode_session: aiohttp.ClientSession | None = None prefill_session: aiohttp.ClientSession | None = None decode_session: aiohttp.ClientSession | None = None ############################################################################### # Utils ############################################################################### MM_TYPES = {"image_url", "audio_url", "input_audio"} def extract_mm_items(request_data: dict) -> list[dict]: """ Return *all* image/audio items that appear anywhere in `messages`. Each returned dict looks like: { "type": "image_url", "image_url": {...} } """ items: list[dict] = [] for msg in request_data.get("messages", []): content = msg.get("content") if not isinstance(content, list): continue for item in content: if item.get("type") in MM_TYPES: items.append(item) return items async def fanout_encoder_primer( orig_request: dict, e_urls: list[str], req_id: str, ) -> None: """ 1. Build one request *per MM item* with all text removed. 2. Send them concurrently to the encode cluster. 3. Raise if any of them fails. """ logger.info("[%s] Processing multimodal items...", req_id) mm_items = extract_mm_items(orig_request) if not mm_items: logger.info("[%s] No multimodal items, skipping encoder", req_id) return # nothing to do logger.info("[%s] got %d multimodal items...", req_id, len(mm_items)) tasks = [] # Round-robin over encode servers to distribute load a bit url_cycle = (e_urls[i % len(e_urls)] for i in range(len(mm_items))) for idx, (item, target_url) in enumerate(zip(mm_items, url_cycle)): # Derive a *child* request id: <parent>:<index>:<random-short> child_req_id = f"{req_id}:{idx}:{uuid.uuid4().hex[:6]}" headers = {"x-request-id": child_req_id} encoder_req = { # You *may* need to keep additional fields "model": orig_request.get("model"), "messages": [ {"role": "user", "content": [item]}, ], # Only need 1 token so the server actually runs the encoder path "max_tokens": 1, "stream": False, } tasks.append( encode_session.post( f"{target_url}/v1/chat/completions", json=encoder_req, headers=headers, ) ) results = await asyncio.gather(*tasks, return_exceptions=True) # Fail fast if any sub-request failed for idx, r in enumerate(results): if isinstance(r, Exception): logger.error( "[%s] Encoder request #%d raised exception: %s", req_id, idx, r, exc_info=r, ) raise HTTPException( status_code=502, detail=f"Encoder request failed: {str(r)}" ) if r.status != 200: try: detail = await r.text() except Exception: detail = "<unable to read body>" logger.error( "[%s] Encoder request #%d returned status %s: %s", req_id, idx, r.status, detail, ) raise HTTPException( status_code=r.status, detail=f"Encoder request failed: {detail}", ) logger.info( "[%s] All %d encoder requests completed successfully", req_id, len(mm_items) ) async def maybe_prefill( req_data: dict, p_url: str, req_id: str, ) -> dict: """ - Do prefill-only task if p_url exist; - Return modified request data with kv transfer params (for nixl connector) - Else, skip and return the original request data for decode """ if p_url: logger.info("[%s] Processing through prefill: %s", req_id, p_url) prefill_response = await process_prefill_stage(req_data, p_url, req_id) # for nixl connector to facilitate kv transfer... prefill_response_json = await prefill_response.json() kv_transfer_params = prefill_response_json.get("kv_transfer_params", {}) if kv_transfer_params: req_data["kv_transfer_params"] = kv_transfer_params return req_data else: return req_data async def process_prefill_stage( req_data: dict, p_url: str, req_id: str, ) -> dict: """Process request through Prefill stage and return kv_transfer_params""" logger.info("[%s] Sending prefill request to: %s", req_id, p_url) prefill_request = req_data.copy() prefill_request["kv_transfer_params"] = { "do_remote_decode": True, "do_remote_prefill": False, "remote_engine_id": None, "remote_block_ids": None, "remote_host": None, "remote_port": None, } prefill_request["stream"] = False prefill_request["max_tokens"] = 1 if "max_completion_tokens" in prefill_request: prefill_request["max_completion_tokens"] = 1 if "stream_options" in prefill_request: del prefill_request["stream_options"] headers = {"x-request-id": req_id} try: prefill_response = await prefill_session.post( f"{p_url}/v1/chat/completions", json=prefill_request, headers=headers ) prefill_response.raise_for_status() if prefill_response.status != 200: error_text = await prefill_response.text() logger.error( "[%s] Prefill request failed with status %d: %s", req_id, prefill_response.status, error_text, ) raise HTTPException( status_code=prefill_response.status, detail={"error": "Prefill request failed", "message": error_text}, ) logger.info("[%s] Prefill request completed successfully", req_id) return prefill_response except Exception as e: logger.error("Prefill processing failed: %s", str(e)) raise HTTPException( status_code=500, detail={"error": "Prefill processing error", "message": str(e)}, ) from e ############################################################################### # Middleware for request/response logging ############################################################################### @app.middleware("http") async def log_requests(request: Request, call_next): """Middleware to log all incoming requests and responses""" req_id = request.headers.get("x-request-id", str(uuid.uuid4())) # Log incoming request logger.info( ">>> [%s] %s %s from %s", req_id, request.method, request.url.path, request.client.host if request.client else "unknown", ) try: # Process request response = await call_next(request) # Log response logger.info( "<<< [%s] %s %s completed with status %d", req_id, request.method, request.url.path, response.status_code, ) return response except Exception as e: # Log errors logger.exception( "!!! [%s] %s %s failed with error: %s", req_id, request.method, request.url.path, str(e), ) raise ############################################################################### # FastAPI lifecycle ############################################################################### @app.on_event("startup") async def on_startup() -> None: global encode_session, prefill_session, decode_session timeout = aiohttp.ClientTimeout(total=100_000) connector = aiohttp.TCPConnector(limit=0, force_close=False) encode_session = aiohttp.ClientSession(timeout=timeout, connector=connector) if app.state.p_urls: # only setup if prefill instance(s) exist prefill_session = aiohttp.ClientSession(timeout=timeout, connector=connector) decode_session = aiohttp.ClientSession(timeout=timeout, connector=connector) @app.on_event("shutdown") async def on_shutdown() -> None: global encode_session, prefill_session, decode_session if encode_session: await encode_session.close() if prefill_session: await prefill_session.close() if decode_session: await decode_session.close() ############################################################################### # Core forwarding ############################################################################### async def forward_non_stream( req_data: dict, req_id: str, e_urls: list[str], p_url: str, d_url: str ) -> dict: try: # Step 1: Process through Encoder instance (if has MM input) await fanout_encoder_primer(req_data, e_urls, req_id) # Step 2: Process through Prefill instance req_data = await maybe_prefill(req_data, p_url, req_id) # Step 3: Process through Decode instance logger.info("[%s] Forwarding to decode: %s", req_id, d_url) headers = {"x-request-id": req_id} # Non-streaming response async with decode_session.post( f"{d_url}/v1/chat/completions", json=req_data, headers=headers ) as resp: resp.raise_for_status() return await resp.json() except HTTPException: raise except Exception as e: logger.exception("[%s] Error in forward_non_stream: %s", req_id, str(e)) raise HTTPException(status_code=500, detail=f"Proxy error: {str(e)}") from e async def forward_stream( req_data: dict, req_id: str, e_urls: list[str], p_url: str, d_url: str ) -> AsyncIterator[str]: try: # Step 1: Process through Encoder instance (if has MM input) await fanout_encoder_primer(req_data, e_urls, req_id) # Step 2: Process through Prefill instance req_data = await maybe_prefill(req_data, p_url, req_id) # Step 3: Process through Decode instance logger.info("[%s] Starting streaming from decode: %s", req_id, d_url) headers = {"x-request-id": req_id} # Streaming response async with decode_session.post( f"{d_url}/v1/chat/completions", json=req_data, headers=headers, ) as resp: resp.raise_for_status() async for chunk in resp.content.iter_chunked(1024): if chunk: yield chunk.decode("utf-8", errors="ignore") logger.info("[%s] Streaming completed", req_id) except HTTPException: logger.exception("[%s] HTTPException in forward_stream", req_id) raise except Exception as e: logger.exception("[%s] Error in forward_stream: %s", req_id, str(e)) raise HTTPException( status_code=500, detail=f"Proxy streaming error: {str(e)}" ) from e ############################################################################### # Public routes ############################################################################### @app.post("/v1/chat/completions") async def chat_completions(request: Request): try: req_data = await request.json() req_id = request.headers.get("x-request-id", str(uuid.uuid4())) e_urls = app.state.e_urls # we want the full list for fan-out p_url = random.choice(app.state.p_urls) if app.state.p_urls else None d_url = random.choice(app.state.d_urls) is_streaming = req_data.get("stream", False) if is_streaming: return StreamingResponse( forward_stream(req_data, req_id, e_urls, p_url, d_url), media_type="text/event-stream", ) result = await forward_non_stream(req_data, req_id, e_urls, p_url, d_url) return JSONResponse(content=result) except HTTPException: raise except Exception as e: logger.exception("Error in chat_completions endpoint: %s", str(e)) raise HTTPException( status_code=500, detail=f"Request processing error: {str(e)}" ) from e @app.get("/v1/models") async def list_models(): async with decode_session.get(f"{app.state.d_urls[0]}/v1/models") as resp: resp.raise_for_status() return await resp.json() @app.get("/health") async def health_check(): async def healthy(urls): if not urls: return "empty" for u in urls: try: async with encode_session.get(f"{u}/health") as resp: resp.raise_for_status() except Exception: return "unhealthy" return "healthy" e_status, p_status, d_status = await asyncio.gather( healthy(app.state.e_urls), healthy(app.state.p_urls), healthy(app.state.d_urls) ) overall_healthy = all( status != "unhealthy" for status in (e_status, p_status, d_status) ) status_code = 200 if overall_healthy else 503 return JSONResponse( { "proxy": "healthy", "encode_cluster": e_status, "prefill_cluster": p_status, "decode_cluster": d_status, }, status_code=status_code, ) ############################################################################### # Simple profiler fan-out (unchanged except for sessions) ############################################################################### async def _post_if_available( session: aiohttp.ClientSession, url: str, payload: dict, headers: dict, ) -> dict | None: """ POST `payload` to `url`. Returns ------- • The decoded JSON body on success (2xx) • None if the endpoint does not exist (404) • Raises for anything else. """ try: resp = await session.post(url, json=payload, headers=headers) if resp.status == 404: # profiling disabled on that server logger.warning("Profiling endpoint missing on %s", url) return None resp.raise_for_status() return await resp.json(content_type=None) except aiohttp.ClientResponseError as exc: # Pass 404 through the branch above, re-raise everything else if exc.status == 404: logger.warning("Profiling endpoint missing on %s", url) return None raise except Exception: # Network errors etc.: propagate raise async def _profile_cmd(cmd: str, payload: dict, e_url: str, p_url: str, d_url: str): """ Fire & forget to both clusters, tolerate 404. """ headers = {"Authorization": f"Bearer {os.getenv('OPENAI_API_KEY', '')}"} encode_task = _post_if_available( encode_session, f"{e_url}/{cmd}_profile", payload, headers ) prefill_task = ( _post_if_available(prefill_session, f"{p_url}/{cmd}_profile", payload, headers) if p_url is not None else asyncio.sleep(0) ) decode_task = _post_if_available( decode_session, f"{d_url}/{cmd}_profile", payload, headers ) encode_res, prefill_res, decode_res = await asyncio.gather( encode_task, prefill_task, decode_task ) # If *all* clusters said “I don’t have that route”, surface an error if encode_res is prefill_res is decode_res is None: raise HTTPException( status_code=503, detail="Profiling endpoints are disabled on all clusters", ) return { "encode": encode_res, # may be None "prefill": prefill_res, # may be None "decode": decode_res, # may be None } @app.post("/start_profile") async def start_profile(request: Request): body = await request.json() # TODO: handle multi urls properly e_url = random.choice(app.state.e_urls) p_url = random.choice(app.state.p_urls) if app.state.p_urls else None d_url = random.choice(app.state.d_urls) return await _profile_cmd("start", body, e_url, p_url, d_url) @app.post("/stop_profile") async def stop_profile(request: Request): body = await request.json() # TODO: handle multi urls properly e_url = random.choice(app.state.e_urls) p_url = random.choice(app.state.p_urls) if app.state.p_urls else None d_url = random.choice(app.state.d_urls) return await _profile_cmd("stop", body, e_url, p_url, d_url) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--host", default="0.0.0.0") parser.add_argument("--port", type=int, default=8000) parser.add_argument( "--encode-servers-urls", required=True, help='Comma-separated encode URLs ("http://e1:8001,http://e2:8001")', ) parser.add_argument( "--prefill-servers-urls", required=True, help=( 'Comma-separated prefill URLs ("http://p1:8003,http://p2:8004") ', 'to enable E->P->D, set "disable" or "none" to enable E->PD', ), ) parser.add_argument( "--decode-servers-urls", required=True, help='Comma-separated decode URLs ("http://d1:8005,http://d2:8006")', ) args = parser.parse_args() app.state.e_urls = [ u.strip() for u in args.encode_servers_urls.split(",") if u.strip() ] app.state.d_urls = [ u.strip() for u in args.decode_servers_urls.split(",") if u.strip() ] # handle prefill instances if args.prefill_servers_urls.lower() in ("disable", "none", ""): app.state.p_urls = [] logger.info( "Disaggregated prefill phase explicitly disabled by user. Running E + PD..." ) else: app.state.p_urls = [ u.strip() for u in args.prefill_servers_urls.split(",") if u.strip() ] logger.info("Disaggregated prefill phase is enabled. Running E + P + D...") logger.info("Proxy listening on %s:%s", args.host, args.port) logger.info("Encode servers: %s", app.state.e_urls) logger.info("Prefill instances %s", app.state.p_urls) logger.info("Decode servers: %s", app.state.d_urls) uvicorn.run( app, host=args.host, port=args.port, log_level="info", loop="uvloop", access_log=True, )
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/others/tensorize_vllm_model.py
examples/others/tensorize_vllm_model.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import json import logging import os import uuid from vllm import LLM, SamplingParams from vllm.engine.arg_utils import EngineArgs from vllm.lora.request import LoRARequest from vllm.model_executor.model_loader.tensorizer import ( TensorizerArgs, TensorizerConfig, tensorize_lora_adapter, tensorize_vllm_model, tensorizer_kwargs_arg, ) from vllm.utils.argparse_utils import FlexibleArgumentParser logger = logging.getLogger() """ tensorize_vllm_model.py is a script that can be used to serialize and deserialize vLLM models. These models can be loaded using tensorizer to the GPU extremely quickly over an HTTP/HTTPS endpoint, an S3 endpoint, or locally. Tensor encryption and decryption is also supported, although libsodium must be installed to use it. Install vllm with tensorizer support using `pip install vllm[tensorizer]`. To learn more about tensorizer, visit https://github.com/coreweave/tensorizer To serialize a model, install vLLM from source, then run something like this from the root level of this repository: python examples/others/tensorize_vllm_model.py \ --model facebook/opt-125m \ serialize \ --serialized-directory s3://my-bucket \ --suffix v1 Which downloads the model from HuggingFace, loads it into vLLM, serializes it, and saves it to your S3 bucket. A local directory can also be used. This assumes your S3 credentials are specified as environment variables in the form of `S3_ACCESS_KEY_ID`, `S3_SECRET_ACCESS_KEY`, and `S3_ENDPOINT_URL`. To provide S3 credentials directly, you can provide `--s3-access-key-id` and `--s3-secret-access-key`, as well as `--s3-endpoint` as CLI args to this script. You can also encrypt the model weights with a randomly-generated key by providing a `--keyfile` argument. To deserialize a model, you can run something like this from the root level of this repository: python examples/others/tensorize_vllm_model.py \ --model EleutherAI/gpt-j-6B \ --dtype float16 \ deserialize \ --path-to-tensors s3://my-bucket/vllm/EleutherAI/gpt-j-6B/v1/model.tensors Which downloads the model tensors from your S3 bucket and deserializes them. You can also provide a `--keyfile` argument to decrypt the model weights if they were serialized with encryption. To support distributed tensor-parallel models, each model shard will be serialized to a separate file. The tensorizer_uri is then specified as a string template with a format specifier such as '%03d' that will be rendered with the shard's rank. Sharded models serialized with this script will be named as model-rank-%03d.tensors For more information on the available arguments for serializing, run `python -m examples.others.tensorize_vllm_model serialize --help`. Or for deserializing: `python examples/others/tensorize_vllm_model.py deserialize --help`. Once a model is serialized, tensorizer can be invoked with the `LLM` class directly to load models: ```python from vllm import LLM llm = LLM( "s3://my-bucket/vllm/facebook/opt-125m/v1", load_format="tensorizer", ) ``` A serialized model can be used during model loading for the vLLM OpenAI inference server: ``` vllm serve s3://my-bucket/vllm/facebook/opt-125m/v1 \ --load-format tensorizer ``` In order to see all of the available arguments usable to configure loading with tensorizer that are given to `TensorizerConfig`, run: `python examples/others/tensorize_vllm_model.py deserialize --help` under the `tensorizer options` section. These can also be used for deserialization in this example script, although `--tensorizer-uri` and `--path-to-tensors` are functionally the same in this case. Tensorizer can also be used to save and load LoRA adapters. A LoRA adapter can be serialized directly with the path to the LoRA adapter on HF Hub and a TensorizerConfig object. In this script, passing a HF id to a LoRA adapter will serialize the LoRA adapter artifacts to `--serialized-directory`. You can then use the LoRA adapter with `vllm serve`, for instance, by ensuring the LoRA artifacts are in your model artifacts directory and specifying `--enable-lora`. For instance: ``` vllm serve s3://my-bucket/vllm/facebook/opt-125m/v1 \ --load-format tensorizer \ --enable-lora ``` """ def get_parser(): parser = FlexibleArgumentParser( description="An example script that can be used to serialize and " "deserialize vLLM models. These models " "can be loaded using tensorizer directly to the GPU " "extremely quickly. Tensor encryption and decryption is " "also supported, although libsodium must be installed to " "use it." ) parser = EngineArgs.add_cli_args(parser) parser.add_argument( "--lora-path", type=str, required=False, help="Path to a LoRA adapter to " "serialize along with model tensors. This can then be deserialized " "along with the model by instantiating a TensorizerConfig object, " "creating a dict from it with TensorizerConfig.to_serializable(), " "and passing it to LoRARequest's initializer with the kwarg " "tensorizer_config_dict.", ) subparsers = parser.add_subparsers(dest="command", required=True) serialize_parser = subparsers.add_parser( "serialize", help="Serialize a model to `--serialized-directory`" ) serialize_parser.add_argument( "--suffix", type=str, required=False, help=( "The suffix to append to the serialized model directory, which is " "used to construct the location of the serialized model tensors, " "e.g. if `--serialized-directory` is `s3://my-bucket/` and " "`--suffix` is `v1`, the serialized model tensors will be " "saved to " "`s3://my-bucket/vllm/EleutherAI/gpt-j-6B/v1/model.tensors`. " "If none is provided, a random UUID will be used." ), ) serialize_parser.add_argument( "--serialized-directory", type=str, required=True, help="The directory to serialize the model to. " "This can be a local directory or S3 URI. The path to where the " "tensors are saved is a combination of the supplied `dir` and model " "reference ID. For instance, if `dir` is the serialized directory, " "and the model HuggingFace ID is `EleutherAI/gpt-j-6B`, tensors will " "be saved to `dir/vllm/EleutherAI/gpt-j-6B/suffix/model.tensors`, " "where `suffix` is given by `--suffix` or a random UUID if not " "provided.", ) serialize_parser.add_argument( "--serialization-kwargs", type=tensorizer_kwargs_arg, required=False, help=( "A JSON string containing additional keyword arguments to " "pass to Tensorizer's TensorSerializer during " "serialization." ), ) serialize_parser.add_argument( "--keyfile", type=str, required=False, help=( "Encrypt the model weights with a randomly-generated binary key," " and save the key at this path" ), ) deserialize_parser = subparsers.add_parser( "deserialize", help=( "Deserialize a model from `--path-to-tensors`" " to verify it can be loaded and used." ), ) deserialize_parser.add_argument( "--path-to-tensors", type=str, required=False, help="The local path or S3 URI to the model tensors to deserialize. ", ) deserialize_parser.add_argument( "--serialized-directory", type=str, required=False, help="Directory with model artifacts for loading. Assumes a " "model.tensors file exists therein. Can supersede " "--path-to-tensors.", ) deserialize_parser.add_argument( "--keyfile", type=str, required=False, help=( "Path to a binary key to use to decrypt the model weights," " if the model was serialized with encryption" ), ) deserialize_parser.add_argument( "--deserialization-kwargs", type=tensorizer_kwargs_arg, required=False, help=( "A JSON string containing additional keyword arguments to " "pass to Tensorizer's `TensorDeserializer` during " "deserialization." ), ) TensorizerArgs.add_cli_args(deserialize_parser) return parser def merge_extra_config_with_tensorizer_config(extra_cfg: dict, cfg: TensorizerConfig): for k, v in extra_cfg.items(): if hasattr(cfg, k): setattr(cfg, k, v) logger.info( "Updating TensorizerConfig with %s from " "--model-loader-extra-config provided", k, ) def deserialize(args, tensorizer_config): if args.lora_path: tensorizer_config.lora_dir = tensorizer_config.tensorizer_dir llm = LLM( model=args.model, load_format="tensorizer", tensor_parallel_size=args.tensor_parallel_size, model_loader_extra_config=tensorizer_config, enable_lora=True, ) sampling_params = SamplingParams( temperature=0, max_tokens=256, stop=["[/assistant]"] ) # Truncating this as the extra text isn't necessary prompts = ["[user] Write a SQL query to answer the question based on ..."] # Test LoRA load print( llm.generate( prompts, sampling_params, lora_request=LoRARequest( "sql-lora", 1, args.lora_path, tensorizer_config_dict=tensorizer_config.to_serializable(), ), ) ) else: llm = LLM( model=args.model, load_format="tensorizer", tensor_parallel_size=args.tensor_parallel_size, model_loader_extra_config=tensorizer_config, ) return llm def main(): parser = get_parser() args = parser.parse_args() s3_access_key_id = getattr(args, "s3_access_key_id", None) or os.environ.get( "S3_ACCESS_KEY_ID", None ) s3_secret_access_key = getattr( args, "s3_secret_access_key", None ) or os.environ.get("S3_SECRET_ACCESS_KEY", None) s3_endpoint = getattr(args, "s3_endpoint", None) or os.environ.get( "S3_ENDPOINT_URL", None ) credentials = { "s3_access_key_id": s3_access_key_id, "s3_secret_access_key": s3_secret_access_key, "s3_endpoint": s3_endpoint, } model_ref = args.model if args.command == "serialize" or args.command == "deserialize": keyfile = args.keyfile else: keyfile = None extra_config = {} if args.model_loader_extra_config: extra_config = json.loads(args.model_loader_extra_config) tensorizer_dir = args.serialized_directory or extra_config.get("tensorizer_dir") tensorizer_uri = getattr(args, "path_to_tensors", None) or extra_config.get( "tensorizer_uri" ) if tensorizer_dir and tensorizer_uri: parser.error( "--serialized-directory and --path-to-tensors cannot both be provided" ) if not tensorizer_dir and not tensorizer_uri: parser.error( "Either --serialized-directory or --path-to-tensors must be provided" ) if args.command == "serialize": engine_args = EngineArgs.from_cli_args(args) input_dir = tensorizer_dir.rstrip("/") suffix = args.suffix if args.suffix else uuid.uuid4().hex base_path = f"{input_dir}/vllm/{model_ref}/{suffix}" if engine_args.tensor_parallel_size > 1: model_path = f"{base_path}/model-rank-%03d.tensors" else: model_path = f"{base_path}/model.tensors" tensorizer_config = TensorizerConfig( tensorizer_uri=model_path, encryption_keyfile=keyfile, serialization_kwargs=args.serialization_kwargs or {}, **credentials, ) if args.lora_path: tensorizer_config.lora_dir = tensorizer_config.tensorizer_dir tensorize_lora_adapter(args.lora_path, tensorizer_config) merge_extra_config_with_tensorizer_config(extra_config, tensorizer_config) tensorize_vllm_model(engine_args, tensorizer_config) elif args.command == "deserialize": tensorizer_config = TensorizerConfig( tensorizer_uri=args.path_to_tensors, tensorizer_dir=args.serialized_directory, encryption_keyfile=keyfile, deserialization_kwargs=args.deserialization_kwargs or {}, **credentials, ) merge_extra_config_with_tensorizer_config(extra_config, tensorizer_config) deserialize(args, tensorizer_config) else: raise ValueError("Either serialize or deserialize must be specified.") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/others/lmcache/kv_cache_sharing_lmcache_v1.py
examples/others/lmcache/kv_cache_sharing_lmcache_v1.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file demonstrates the example usage of remote KV cache sharing with LMCache. We will launch 2 vllm instances, and launch an additional LMCache server. KV cache is transferred in the following manner: (1) vLLM instance 1 -> LMCache server (KV cache store). (2) LMCache server -> vLLM instance 2 (KV cache reuse/retrieve). Note that lmcache needs to be installed to run this example. Learn more about LMCache in https://github.com/LMCache/LMCache. """ import os import subprocess import time from multiprocessing import Event, Process from lmcache.integration.vllm.utils import ENGINE_NAME from lmcache.v1.cache_engine import LMCacheEngineBuilder from vllm import LLM, SamplingParams from vllm.config import KVTransferConfig # LMCache-related environment variables # The port to start LMCache server port = 8100 # Use experimental features in LMCache os.environ["LMCACHE_USE_EXPERIMENTAL"] = "True" # LMCache is set to use 256 tokens per chunk os.environ["LMCACHE_CHUNK_SIZE"] = "256" # Disable local CPU backend in LMCache os.environ["LMCACHE_LOCAL_CPU"] = "False" # Set local CPU memory buffer limit to 5.0 GB os.environ["LMCACHE_MAX_LOCAL_CPU_SIZE"] = "5.0" # Set the remote URL for LMCache server os.environ["LMCACHE_REMOTE_URL"] = f"lm://localhost:{port}" # Set the serializer/deserializer between vllm and LMCache server # `naive` indicates using raw bytes of the tensor without any compression os.environ["LMCACHE_REMOTE_SERDE"] = "naive" prompts = [ "Hello, how are you?" * 1000, ] def run_store(store_done, prompts): # We use GPU 0 for KV cache store process. os.environ["CUDA_VISIBLE_DEVICES"] = "0" sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) ktc = KVTransferConfig(kv_connector="LMCacheConnectorV1", kv_role="kv_both") # Set GPU memory utilization to 0.8 for an A40 GPU with 40GB # memory. Reduce the value if your GPU has less memory. llm = LLM( model="mistralai/Mistral-7B-Instruct-v0.2", kv_transfer_config=ktc, max_model_len=8000, gpu_memory_utilization=0.8, enforce_eager=True, ) outputs = llm.generate(prompts, sampling_params) for output in outputs: generated_text = output.outputs[0].text print(f"Generated text: {generated_text!r}") print("KV cache store is finished.") store_done.set() # Clean up lmcache backend LMCacheEngineBuilder.destroy(ENGINE_NAME) def run_retrieve(store_done, prompts, timeout=1): # We use GPU 1 for KV cache retrieve process. os.environ["CUDA_VISIBLE_DEVICES"] = "1" sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) ktc = KVTransferConfig(kv_connector="LMCacheConnectorV1", kv_role="kv_both") # Set GPU memory utilization to 0.8 for an A40 GPU with 40GB # of memory. Reduce the value if your GPU has less memory. llm = LLM( model="mistralai/Mistral-7B-Instruct-v0.2", kv_transfer_config=ktc, max_model_len=8000, gpu_memory_utilization=0.8, enforce_eager=True, ) print("Waiting for KV cache store to finish...") store_done.wait() time.sleep(timeout) outputs = llm.generate(prompts, sampling_params) for output in outputs: generated_text = output.outputs[0].text print(f"Generated text: {generated_text!r}") # Clean up lmcache backend LMCacheEngineBuilder.destroy(ENGINE_NAME) def run_lmcache_server(port): server_proc = subprocess.Popen( ["python", "-m", "lmcache.v1.server", "localhost", str(port)] ) return server_proc def main(): store_done = Event() store_process = Process(target=run_store, args=(store_done, prompts)) retrieve_process = Process(target=run_retrieve, args=(store_done, prompts)) lmcache_server_process = run_lmcache_server(port) # Start KV cache store process store_process.start() # Start KV cache retrieve process retrieve_process.start() # Clean up the processes store_process.join() retrieve_process.terminate() lmcache_server_process.terminate() lmcache_server_process.wait() if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false
vllm-project/vllm
https://github.com/vllm-project/vllm/blob/0d4044edd85de30d7d4558aeea4d1e95c7c556d6/examples/others/lmcache/cpu_offload_lmcache.py
examples/others/lmcache/cpu_offload_lmcache.py
# SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """ This file demonstrates the example usage of cpu offloading with LMCache in vLLM v1 or v0. Usage: Specify vLLM version -v v0 : Use LMCacheConnector model = mistralai/Mistral-7B-Instruct-v0.2 (Includes enable_chunked_prefill = True) -v v1 : Use LMCacheConnectorV1 (default) model = meta-llama/Meta-Llama-3.1-8B-Instruct (Without enable_chunked_prefill) Note that `lmcache` is needed to run this example. Requirements: https://docs.lmcache.ai/getting_started/installation.html#prerequisites Learn more about LMCache environment setup, please refer to: https://docs.lmcache.ai/getting_started/installation.html """ import argparse import contextlib import os import time from dataclasses import asdict from lmcache.integration.vllm.utils import ENGINE_NAME from lmcache.v1.cache_engine import LMCacheEngineBuilder from vllm import LLM, SamplingParams from vllm.config import KVTransferConfig from vllm.engine.arg_utils import EngineArgs def setup_environment_variables(): # LMCache-related environment variables # Use experimental features in LMCache os.environ["LMCACHE_USE_EXPERIMENTAL"] = "True" # LMCache is set to use 256 tokens per chunk os.environ["LMCACHE_CHUNK_SIZE"] = "256" # Enable local CPU backend in LMCache os.environ["LMCACHE_LOCAL_CPU"] = "True" # Set local CPU memory limit to 5.0 GB os.environ["LMCACHE_MAX_LOCAL_CPU_SIZE"] = "5.0" @contextlib.contextmanager def build_llm_with_lmcache(lmcache_connector: str, model: str): ktc = KVTransferConfig( kv_connector=lmcache_connector, kv_role="kv_both", ) # Set GPU memory utilization to 0.8 for an A40 GPU with 40GB # memory. Reduce the value if your GPU has less memory. # Note: LMCache supports chunked prefill (see vLLM#14505, LMCache#392). llm_args = EngineArgs( model=model, kv_transfer_config=ktc, max_model_len=8000, gpu_memory_utilization=0.8, ) llm = LLM(**asdict(llm_args)) try: yield llm finally: # Clean up lmcache backend LMCacheEngineBuilder.destroy(ENGINE_NAME) def print_output( llm: LLM, prompt: list[str], sampling_params: SamplingParams, req_str: str, ): # Should be able to see logs like the following: # `LMCache INFO: Storing KV cache for 6006 out of 6006 tokens for request 0` # This indicates that the KV cache has been stored in LMCache. start = time.time() outputs = llm.generate(prompt, sampling_params) print("-" * 50) for output in outputs: generated_text = output.outputs[0].text print(f"Generated text: {generated_text!r}") print(f"Generation took {time.time() - start:.2f} seconds, {req_str} request done.") print("-" * 50) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "-v", "--version", choices=["v0", "v1"], default="v1", help="Specify vLLM version (default: v1)", ) return parser.parse_args() def main(): lmcache_connector = "LMCacheConnectorV1" model = "meta-llama/Meta-Llama-3.1-8B-Instruct" setup_environment_variables() with build_llm_with_lmcache(lmcache_connector, model) as llm: # This example script runs two requests with a shared prefix. # Define the shared prompt and specific prompts shared_prompt = "Hello, how are you?" * 1000 first_prompt = [ shared_prompt + "Hello, my name is", ] second_prompt = [ shared_prompt + "Tell me a very long story", ] sampling_params = SamplingParams(temperature=0, top_p=0.95, max_tokens=10) # Print the first output print_output(llm, first_prompt, sampling_params, "first") time.sleep(1) # print the second output print_output(llm, second_prompt, sampling_params, "second") if __name__ == "__main__": main()
python
Apache-2.0
0d4044edd85de30d7d4558aeea4d1e95c7c556d6
2026-01-04T14:38:19.902011Z
false