tts / traverse_bs_util_std.py
Hamerlate's picture
Upload folder using huggingface_hub
34779f9 verified
import os
import time
import statistics
from typing import List, Tuple, Dict
import torch
import torch.cuda.nvtx as nvtx
from vllm import LLM, SamplingParams
from transformers import AutoTokenizer
# ========= 强制使用 vLLM V1 =========
os.environ.setdefault("VLLM_USE_V1", "1")
os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
# 可选:打开 V1 metrics 统计
os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
# ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
try:
from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
except Exception:
Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
# ========= 配置 =========
MODEL_NAME = "Qwen/Qwen2-1.5B"
DTYPE = "bfloat16"
TP = 1
GPU_MEM_UTIL = 0.90
TRUST_REMOTE_CODE = True
# 场景:prefill=输入tokens,decode=输出tokens
SCENARIOS = [
# {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
# {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
{"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
]
BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
SEED = 1234
TEMPERATURE = 0.0
TOP_P = 1.0
WARMUP_PER_BS = 1 # 每个批次做一次预热
# ========= 构造“精确 token 数量”的 prompt =========
def build_exact_token_prompt(tokenizer, target_len: int) -> str:
if target_len <= 1:
# 最小化 prompt:用一个简单 token(避免空串导致0 token)
ids = tokenizer("A", add_special_tokens=False)["input_ids"]
if len(ids) >= 1:
return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
base_text = (
"You are a helpful assistant. "
"Please analyze the following input and respond succinctly. "
)
chunk = " ".join(["data"] * 100) + ". "
text = base_text + chunk * 200 # 足够长的文本
lo, hi = 0, len(text)
target_ids = None
while lo <= hi:
mid = (lo + hi) // 2
ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
if len(ids) == target_len:
target_ids = ids
break
if len(ids) < target_len:
lo = mid + 1
else:
hi = mid - 1
if target_ids is None:
ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
if len(ids) > target_len:
target_ids = ids[:target_len]
else:
filler = " data"
while len(ids) < target_len:
ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
target_ids = ids[:target_len]
prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
# 断言精确长度
assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
return prompt
# ========= V1 metrics 抽取工具 =========
TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
def _iter_children_of_vector(vec_obj):
for attr in ("children", "metrics", "series", "values", "samples", "items"):
if hasattr(vec_obj, attr):
val = getattr(vec_obj, attr)
if isinstance(val, dict):
for v in val.values():
yield v
else:
try:
for v in val:
yield v
except TypeError:
pass
def _collect_hist_sum_count(metrics, metric_name: str):
total_sum = 0.0
total_count = 0.0
for m in metrics:
mname = getattr(m, "name", None)
if mname != metric_name:
continue
# 直接 Histogram
if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
total_sum += float(getattr(m, "sum", 0.0))
total_count += float(getattr(m, "count", 0.0))
continue
# Vector[Histogram]
if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
for child in _iter_children_of_vector(m):
if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
total_sum += float(getattr(child, "sum", 0.0))
total_count += float(getattr(child, "count", 0.0))
return total_sum, total_count
def _metrics_snapshot(llm) -> Dict[str, float]:
try:
mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
except Exception:
return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
def _metrics_delta(before: dict, after: dict):
return {
"ttft_sum": after["ttft_sum"] - before["ttft_sum"],
"ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
"tpot_sum": after["tpot_sum"] - before["tpot_sum"],
"tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
}
# ========= 带 NVTX 的 generate 包装 =========
def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
return llm.generate(prompts, params)
# ========= 统计格式化 =========
def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
xs = [v for v in x if (v == v)] # 过滤 NaN
if not xs:
return (float("nan"), float("nan"), float("nan"))
return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
def main():
print("--- vLLM V1 基准测试(含 NVTX 标记)---")
print(f"模型: {MODEL_NAME}")
print(f"批量大小: {BATCH_SIZES}")
print(f"场景: {[s['name'] for s in SCENARIOS]}")
print("-" * 60)
if not torch.cuda.is_available():
print("错误:需要 CUDA GPU。")
return
print("加载分词器/模型中...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
# 用 NVTX 标记模型加载阶段
nvtx.range_push("LLM_init")
llm = LLM(
model=MODEL_NAME,
tensor_parallel_size=TP,
dtype=DTYPE,
trust_remote_code=TRUST_REMOTE_CODE,
gpu_memory_utilization=GPU_MEM_UTIL,
max_num_seqs=1024, # 足够覆盖本次扫描
max_model_len=4096,
disable_log_stats=False, # 开启 V1 metrics 收集
)
nvtx.range_pop()
print("模型加载完成。")
for sc in SCENARIOS:
name = sc["name"]
prompt_tokens = sc["prompt_tokens"]
max_new_tokens = sc["max_new_tokens"]
print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
# 准备精确长度 prompt
prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
# 采样参数(贪心)
sampling_params = SamplingParams(
max_tokens=max_new_tokens,
temperature=TEMPERATURE,
top_p=TOP_P,
seed=SEED,
n=1,
)
# 记录每个 bs 的结果(便于后续统计或外部解析)
for bs in BATCH_SIZES:
print(f"\n--- 批量大小 bs={bs} ---")
prompts = [prompt_text] * bs
# 预热
# print("预热中...")
# nvtx.range_push(f"WARMUP [{name}] bs={bs}")
# _ = decorated_generate(llm, [prompts[0]], sampling_params)
# torch.cuda.synchronize()
# nvtx.range_pop()
# 正式计时与 V1 metrics
# nvtx.range_push(f"RUN [{name}] bs={bs}")
torch.cuda.synchronize()
snap_before = _metrics_snapshot(llm)
t0 = time.perf_counter()
nvtx.range_push(f"generate [{name}] bs={bs}")
outputs = decorated_generate(llm, prompts, sampling_params)
nvtx.range_pop() # generate
torch.cuda.synchronize()
t1 = time.perf_counter()
snap_after = _metrics_snapshot(llm)
# nvtx.range_pop() # RUN
duration = t1 - t0
# 统计 token 与吞吐
total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
throughput = total_output_tokens / duration if duration > 0 else float("inf")
# 解析 V1 TTFT / 解码吞吐
delta = _metrics_delta(snap_before, snap_after)
if delta["ttft_cnt"] > 0:
ttft = delta["ttft_sum"] / delta["ttft_cnt"]
else:
ttft = float("nan")
if delta["tpot_cnt"] > 0:
avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
decode_tps = 1.0 / avg_tpot
else:
decode_tps = float("nan")
print(f"执行时间: {duration:.4f} s")
print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
print(f"生成总 tokens: {total_output_tokens}")
print(f"吞吐(生成tokens/秒): {throughput:.2f}")
print(f"TTFT (V1 metrics): {ttft:.4f} s")
print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
if __name__ == "__main__":
print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
main()