| | import os |
| | import time |
| | import statistics |
| | from typing import List, Tuple, Dict |
| |
|
| | import torch |
| | import torch.cuda.nvtx as nvtx |
| |
|
| | from vllm import LLM, SamplingParams |
| | from transformers import AutoTokenizer |
| |
|
| | |
| | os.environ.setdefault("VLLM_USE_V1", "1") |
| | os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn") |
| |
|
| | |
| | os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO") |
| |
|
| | |
| | try: |
| | from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector |
| | except Exception: |
| | Counter = Gauge = Histogram = Vector = type("X", (), {}) |
| |
|
| | |
| | MODEL_NAME = "Qwen/Qwen2-1.5B" |
| | DTYPE = "bfloat16" |
| | TP = 1 |
| | GPU_MEM_UTIL = 0.90 |
| | TRUST_REMOTE_CODE = True |
| |
|
| | |
| | SCENARIOS = [ |
| | {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1}, |
| | {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512}, |
| | ] |
| |
|
| | BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256] |
| |
|
| | SEED = 1234 |
| | TEMPERATURE = 0.0 |
| | TOP_P = 1.0 |
| | WARMUP_PER_BS = 1 |
| |
|
| | |
| | def build_exact_token_prompt(tokenizer, target_len: int) -> str: |
| | if target_len <= 1: |
| | |
| | ids = tokenizer("A", add_special_tokens=False)["input_ids"] |
| | if len(ids) >= 1: |
| | return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False) |
| |
|
| | base_text = ( |
| | "You are a helpful assistant. " |
| | "Please analyze the following input and respond succinctly. " |
| | ) |
| | chunk = " ".join(["data"] * 100) + ". " |
| | text = base_text + chunk * 200 |
| |
|
| | lo, hi = 0, len(text) |
| | target_ids = None |
| | while lo <= hi: |
| | mid = (lo + hi) // 2 |
| | ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"] |
| | if len(ids) == target_len: |
| | target_ids = ids |
| | break |
| | if len(ids) < target_len: |
| | lo = mid + 1 |
| | else: |
| | hi = mid - 1 |
| |
|
| | if target_ids is None: |
| | ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"] |
| | if len(ids) > target_len: |
| | target_ids = ids[:target_len] |
| | else: |
| | filler = " data" |
| | while len(ids) < target_len: |
| | ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"] |
| | target_ids = ids[:target_len] |
| |
|
| | prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) |
| | |
| | assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len |
| | return prompt |
| |
|
| | |
| | TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds" |
| | TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" |
| |
|
| | def _iter_children_of_vector(vec_obj): |
| | for attr in ("children", "metrics", "series", "values", "samples", "items"): |
| | if hasattr(vec_obj, attr): |
| | val = getattr(vec_obj, attr) |
| | if isinstance(val, dict): |
| | for v in val.values(): |
| | yield v |
| | else: |
| | try: |
| | for v in val: |
| | yield v |
| | except TypeError: |
| | pass |
| |
|
| | def _collect_hist_sum_count(metrics, metric_name: str): |
| | total_sum = 0.0 |
| | total_count = 0.0 |
| | for m in metrics: |
| | mname = getattr(m, "name", None) |
| | if mname != metric_name: |
| | continue |
| | |
| | if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram": |
| | total_sum += float(getattr(m, "sum", 0.0)) |
| | total_count += float(getattr(m, "count", 0.0)) |
| | continue |
| | |
| | if isinstance(m, Vector) or m.__class__.__name__ == "Vector": |
| | for child in _iter_children_of_vector(m): |
| | if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram": |
| | total_sum += float(getattr(child, "sum", 0.0)) |
| | total_count += float(getattr(child, "count", 0.0)) |
| | return total_sum, total_count |
| |
|
| | def _metrics_snapshot(llm) -> Dict[str, float]: |
| | try: |
| | mets = llm.get_metrics() |
| | except Exception: |
| | return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0} |
| | ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME) |
| | tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME) |
| | return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt} |
| |
|
| | def _metrics_delta(before: dict, after: dict): |
| | return { |
| | "ttft_sum": after["ttft_sum"] - before["ttft_sum"], |
| | "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"], |
| | "tpot_sum": after["tpot_sum"] - before["tpot_sum"], |
| | "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"], |
| | } |
| |
|
| | |
| | def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams): |
| | return llm.generate(prompts, params) |
| |
|
| | |
| | def fmt_stats(x: List[float]) -> Tuple[float, float, float]: |
| | xs = [v for v in x if (v == v)] |
| | if not xs: |
| | return (float("nan"), float("nan"), float("nan")) |
| | return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) |
| |
|
| | def main(): |
| | print("--- vLLM V1 基准测试(含 NVTX 标记)---") |
| | print(f"模型: {MODEL_NAME}") |
| | print(f"批量大小: {BATCH_SIZES}") |
| | print(f"场景: {[s['name'] for s in SCENARIOS]}") |
| | print("-" * 60) |
| |
|
| | if not torch.cuda.is_available(): |
| | print("错误:需要 CUDA GPU。") |
| | return |
| |
|
| | print("加载分词器/模型中...") |
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE) |
| |
|
| | |
| | nvtx.range_push("LLM_init") |
| | llm = LLM( |
| | model=MODEL_NAME, |
| | tensor_parallel_size=TP, |
| | dtype=DTYPE, |
| | trust_remote_code=TRUST_REMOTE_CODE, |
| | gpu_memory_utilization=GPU_MEM_UTIL, |
| | max_num_seqs=256, |
| | max_model_len=8192, |
| | disable_log_stats=False, |
| | ) |
| | nvtx.range_pop() |
| | print("模型加载完成。") |
| |
|
| | for sc in SCENARIOS: |
| | name = sc["name"] |
| | prompt_tokens = sc["prompt_tokens"] |
| | max_new_tokens = sc["max_new_tokens"] |
| |
|
| | print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====") |
| |
|
| | |
| | prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens) |
| |
|
| | |
| | sampling_params = SamplingParams( |
| | max_tokens=max_new_tokens, |
| | temperature=TEMPERATURE, |
| | top_p=TOP_P, |
| | seed=SEED, |
| | n=1, |
| | ) |
| |
|
| | |
| | for bs in BATCH_SIZES: |
| | print(f"\n--- 批量大小 bs={bs} ---") |
| |
|
| | prompts = [prompt_text] * bs |
| |
|
| | |
| | print("预热中...") |
| | nvtx.range_push(f"WARMUP [{name}] bs={bs}") |
| | _ = decorated_generate(llm, [prompts[0]], sampling_params) |
| | torch.cuda.synchronize() |
| | nvtx.range_pop() |
| |
|
| | |
| | nvtx.range_push(f"RUN [{name}] bs={bs}") |
| | torch.cuda.synchronize() |
| | snap_before = _metrics_snapshot(llm) |
| | t0 = time.perf_counter() |
| |
|
| | nvtx.range_push(f"generate [{name}] bs={bs}") |
| | outputs = decorated_generate(llm, prompts, sampling_params) |
| | nvtx.range_pop() |
| |
|
| | torch.cuda.synchronize() |
| | t1 = time.perf_counter() |
| | snap_after = _metrics_snapshot(llm) |
| | nvtx.range_pop() |
| |
|
| | duration = t1 - t0 |
| |
|
| | |
| | total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs) |
| | avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs |
| | throughput = total_output_tokens / duration if duration > 0 else float("inf") |
| |
|
| | |
| | delta = _metrics_delta(snap_before, snap_after) |
| | if delta["ttft_cnt"] > 0: |
| | ttft = delta["ttft_sum"] / delta["ttft_cnt"] |
| | else: |
| | ttft = float("nan") |
| |
|
| | if delta["tpot_cnt"] > 0: |
| | avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] |
| | decode_tps = 1.0 / avg_tpot |
| | else: |
| | decode_tps = float("nan") |
| |
|
| | print(f"执行时间: {duration:.4f} s") |
| | print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})") |
| | print(f"生成总 tokens: {total_output_tokens}") |
| | print(f"吞吐(生成tokens/秒): {throughput:.2f}") |
| | print(f"TTFT (V1 metrics): {ttft:.4f} s") |
| | print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s") |
| |
|
| | print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。") |
| |
|
| | if __name__ == "__main__": |
| | print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}") |
| | main() |
| |
|