Upload folder using huggingface_hub
Browse files- .gitattributes +2 -0
- traverse_bs_util_sim_decoding.log +390 -0
- traverse_bs_util_sim_decoding.nsys-rep +3 -0
- traverse_bs_util_sim_decoding.py +274 -0
- traverse_bs_util_sim_prefill.log +382 -0
- traverse_bs_util_sim_prefill.nsys-rep +3 -0
- traverse_bs_util_sim_prefill.py +262 -0
.gitattributes
CHANGED
|
@@ -65,3 +65,5 @@ qwen_util_bs.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
|
| 65 |
traverse_bs.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
| 66 |
sim_traverse_bs/traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
| 67 |
traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 65 |
traverse_bs.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
| 66 |
sim_traverse_bs/traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
| 67 |
traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
traverse_bs_util_sim_decoding.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
traverse_bs_util_sim_prefill.nsys-rep filter=lfs diff=lfs merge=lfs -text
|
traverse_bs_util_sim_decoding.log
ADDED
|
@@ -0,0 +1,390 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
WARNING: CPU IP/backtrace sampling not supported, disabling.
|
| 2 |
+
Try the 'nsys status --environment' command to learn more.
|
| 3 |
+
|
| 4 |
+
WARNING: CPU context switch tracing not supported, disabling.
|
| 5 |
+
Try the 'nsys status --environment' command to learn more.
|
| 6 |
+
|
| 7 |
+
INFO 08-13 19:21:37 [__init__.py:235] Automatically detected platform cuda.
|
| 8 |
+
CUDA_VISIBLE_DEVICES = 3
|
| 9 |
+
--- vLLM V1 基准测试(含 NVTX 标记)---
|
| 10 |
+
模型: Qwen/Qwen2-1.5B
|
| 11 |
+
批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
|
| 12 |
+
场景: ['prefill1_decode512']
|
| 13 |
+
------------------------------------------------------------
|
| 14 |
+
加载分词器/模型中...
|
| 15 |
+
INFO 08-13 19:21:46 [config.py:1604] Using max model len 4096
|
| 16 |
+
INFO 08-13 19:21:47 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
|
| 17 |
+
INFO 08-13 19:21:52 [__init__.py:235] Automatically detected platform cuda.
|
| 18 |
+
INFO 08-13 19:21:54 [core.py:572] Waiting for init message from front-end.
|
| 19 |
+
INFO 08-13 19:21:54 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
|
| 20 |
+
INFO 08-13 19:21:56 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
|
| 21 |
+
WARNING 08-13 19:21:56 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 22 |
+
INFO 08-13 19:21:56 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
|
| 23 |
+
INFO 08-13 19:21:56 [gpu_model_runner.py:1875] Loading model from scratch...
|
| 24 |
+
INFO 08-13 19:21:56 [cuda.py:290] Using Flash Attention backend on V1 engine.
|
| 25 |
+
INFO 08-13 19:21:57 [weight_utils.py:296] Using model weights format ['*.safetensors']
|
| 26 |
+
INFO 08-13 19:21:57 [weight_utils.py:349] No model.safetensors.index.json found in remote.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
INFO 08-13 19:21:58 [default_loader.py:262] Loading weights took 0.63 seconds
|
| 32 |
+
INFO 08-13 19:21:58 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 1.878581 seconds
|
| 33 |
+
INFO 08-13 19:22:04 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
|
| 34 |
+
INFO 08-13 19:22:04 [backends.py:541] Dynamo bytecode transform time: 5.72 s
|
| 35 |
+
INFO 08-13 19:22:09 [backends.py:161] Directly load the compiled graph(s) for dynamic shape from the cache, took 4.036 s
|
| 36 |
+
INFO 08-13 19:22:10 [monitor.py:34] torch.compile takes 5.72 s in total
|
| 37 |
+
INFO 08-13 19:22:11 [gpu_worker.py:255] Available KV cache memory: 12.81 GiB
|
| 38 |
+
INFO 08-13 19:22:11 [kv_cache_utils.py:833] GPU KV cache size: 479,536 tokens
|
| 39 |
+
INFO 08-13 19:22:11 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.07x
|
| 40 |
+
|
| 41 |
+
INFO 08-13 19:22:13 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
|
| 42 |
+
INFO 08-13 19:22:13 [core.py:193] init engine (profile, create kv cache, warmup model) took 14.62 seconds
|
| 43 |
+
模型加载完成。
|
| 44 |
+
|
| 45 |
+
===== 场景:prefill1_decode512 | prefill=1, decode=512 =====
|
| 46 |
+
|
| 47 |
+
--- 批量大小 bs=1 ---
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
执行时间: 3.3194 s
|
| 51 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 52 |
+
生成总 tokens: 512
|
| 53 |
+
吞吐(生成tokens/秒): 154.24
|
| 54 |
+
TTFT (V1 metrics): 0.0190 s
|
| 55 |
+
解码吞吐 (V1 metrics): 155.07 tok/s
|
| 56 |
+
|
| 57 |
+
--- 批量大小 bs=2 ---
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
执行时间: 3.6484 s
|
| 61 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 62 |
+
生成总 tokens: 1024
|
| 63 |
+
吞吐(生成tokens/秒): 280.67
|
| 64 |
+
TTFT (V1 metrics): 0.0120 s
|
| 65 |
+
解码吞吐 (V1 metrics): 140.73 tok/s
|
| 66 |
+
|
| 67 |
+
--- 批量大小 bs=4 ---
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
执行时间: 3.6361 s
|
| 71 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 72 |
+
生成总 tokens: 2048
|
| 73 |
+
吞吐(生成tokens/秒): 563.24
|
| 74 |
+
TTFT (V1 metrics): 0.0140 s
|
| 75 |
+
解码吞吐 (V1 metrics): 141.25 tok/s
|
| 76 |
+
|
| 77 |
+
--- 批量大小 bs=8 ---
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
执行时间: 3.7267 s
|
| 81 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 82 |
+
生成总 tokens: 4096
|
| 83 |
+
吞吐(生成tokens/秒): 1099.08
|
| 84 |
+
TTFT (V1 metrics): 0.0149 s
|
| 85 |
+
解码吞吐 (V1 metrics): 137.87 tok/s
|
| 86 |
+
|
| 87 |
+
--- 批量大小 bs=16 ---
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
执行时间: 3.8260 s
|
| 91 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 92 |
+
生成总 tokens: 8192
|
| 93 |
+
吞吐(生成tokens/秒): 2141.13
|
| 94 |
+
TTFT (V1 metrics): 0.0136 s
|
| 95 |
+
解码吞吐 (V1 metrics): 134.33 tok/s
|
| 96 |
+
|
| 97 |
+
--- 批量大小 bs=32 ---
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
执行时间: 3.9972 s
|
| 101 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 102 |
+
生成总 tokens: 16384
|
| 103 |
+
吞吐(生成tokens/秒): 4098.85
|
| 104 |
+
TTFT (V1 metrics): 0.0164 s
|
| 105 |
+
解码吞吐 (V1 metrics): 128.92 tok/s
|
| 106 |
+
|
| 107 |
+
--- 批量大小 bs=64 ---
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
执行时间: 4.2731 s
|
| 111 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 112 |
+
生成总 tokens: 32768
|
| 113 |
+
吞吐(生成tokens/秒): 7668.51
|
| 114 |
+
TTFT (V1 metrics): 0.0198 s
|
| 115 |
+
解码吞吐 (V1 metrics): 120.83 tok/s
|
| 116 |
+
|
| 117 |
+
--- 批量大小 bs=128 ---
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
执行时间: 4.8421 s
|
| 121 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 122 |
+
生成总 tokens: 65536
|
| 123 |
+
吞吐(生成tokens/秒): 13534.75
|
| 124 |
+
TTFT (V1 metrics): 0.0316 s
|
| 125 |
+
解码吞吐 (V1 metrics): 107.35 tok/s
|
| 126 |
+
|
| 127 |
+
--- 批量大小 bs=256 ---
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
执行时间: 7.4408 s
|
| 131 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 132 |
+
生成总 tokens: 131072
|
| 133 |
+
吞吐(生成tokens/秒): 17615.41
|
| 134 |
+
TTFT (V1 metrics): 0.0433 s
|
| 135 |
+
解码吞吐 (V1 metrics): 69.70 tok/s
|
| 136 |
+
|
| 137 |
+
--- 批量大小 bs=512 ---
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
执行时间: 12.6794 s
|
| 141 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 142 |
+
生成总 tokens: 262144
|
| 143 |
+
吞吐(生成tokens/秒): 20674.72
|
| 144 |
+
TTFT (V1 metrics): 0.1809 s
|
| 145 |
+
解码吞吐 (V1 metrics): 42.38 tok/s
|
| 146 |
+
|
| 147 |
+
--- 批量大小 bs=1024 ---
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
[rank0]:[W813 19:23:32.135663883 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 151 |
+
执行时间: 25.2865 s
|
| 152 |
+
实际平均输入 tokens: 1.00(目标 1)
|
| 153 |
+
生成总 tokens: 524288
|
| 154 |
+
吞吐(生成tokens/秒): 20733.89
|
| 155 |
+
TTFT (V1 metrics): 0.1191 s
|
| 156 |
+
解码吞吐 (V1 metrics): 20.77 tok/s
|
| 157 |
+
|
| 158 |
+
完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
|
| 159 |
+
GPU 3: General Metrics for NVIDIA AD10x (any frequency)
|
| 160 |
+
Generating '/tmp/nsys-report-bd68.qdstrm'
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
[3/8] Executing 'nvtx_sum' stats report
|
| 164 |
+
|
| 165 |
+
Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
|
| 166 |
+
-------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- --------------------------------------
|
| 167 |
+
31.5 35,202,819,763 1 35,202,819,763.0 35,202,819,763.0 35,202,819,763 35,202,819,763 0.0 PushPop :LLM_init
|
| 168 |
+
22.6 25,285,906,551 1 25,285,906,551.0 25,285,906,551.0 25,285,906,551 25,285,906,551 0.0 PushPop :generate [prefill1_decode512] bs=1024
|
| 169 |
+
11.3 12,679,311,252 1 12,679,311,252.0 12,679,311,252.0 12,679,311,252 12,679,311,252 0.0 PushPop :generate [prefill1_decode512] bs=512
|
| 170 |
+
6.7 7,440,608,085 1 7,440,608,085.0 7,440,608,085.0 7,440,608,085 7,440,608,085 0.0 PushPop :generate [prefill1_decode512] bs=256
|
| 171 |
+
4.3 4,841,914,697 1 4,841,914,697.0 4,841,914,697.0 4,841,914,697 4,841,914,697 0.0 PushPop :generate [prefill1_decode512] bs=128
|
| 172 |
+
3.8 4,272,889,441 1 4,272,889,441.0 4,272,889,441.0 4,272,889,441 4,272,889,441 0.0 PushPop :generate [prefill1_decode512] bs=64
|
| 173 |
+
3.6 3,997,075,015 1 3,997,075,015.0 3,997,075,015.0 3,997,075,015 3,997,075,015 0.0 PushPop :generate [prefill1_decode512] bs=32
|
| 174 |
+
3.4 3,825,710,172 1 3,825,710,172.0 3,825,710,172.0 3,825,710,172 3,825,710,172 0.0 PushPop :generate [prefill1_decode512] bs=16
|
| 175 |
+
3.3 3,726,603,655 1 3,726,603,655.0 3,726,603,655.0 3,726,603,655 3,726,603,655 0.0 PushPop :generate [prefill1_decode512] bs=8
|
| 176 |
+
3.3 3,648,294,896 1 3,648,294,896.0 3,648,294,896.0 3,648,294,896 3,648,294,896 0.0 PushPop :generate [prefill1_decode512] bs=2
|
| 177 |
+
3.2 3,635,960,724 1 3,635,960,724.0 3,635,960,724.0 3,635,960,724 3,635,960,724 0.0 PushPop :generate [prefill1_decode512] bs=4
|
| 178 |
+
3.0 3,319,210,677 1 3,319,210,677.0 3,319,210,677.0 3,319,210,677 3,319,210,677 0.0 PushPop :generate [prefill1_decode512] bs=1
|
| 179 |
+
0.0 90,630 2 45,315.0 45,315.0 41,468 49,162 5,440.5 PushPop CCCL:cub::DeviceSegmentedRadixSort
|
| 180 |
+
|
| 181 |
+
[4/8] Executing 'osrt_sum' stats report
|
| 182 |
+
|
| 183 |
+
Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
|
| 184 |
+
-------- ----------------- --------- --------------- ---------------- --------- -------------- ---------------- ----------------------
|
| 185 |
+
29.7 1,284,023,267,442 49,709 25,830,800.6 28,820.0 1,000 96,534,138,823 969,651,617.8 pthread_cond_timedwait
|
| 186 |
+
24.2 1,045,773,118,476 73,135 14,299,215.4 10,062,843.0 1,010 81,708,518,315 463,992,514.8 epoll_wait
|
| 187 |
+
23.9 1,031,963,983,489 549 1,879,715,816.9 15,827.0 1,644 96,535,575,076 13,212,126,179.1 pthread_cond_wait
|
| 188 |
+
8.3 357,289,068,553 57 6,268,229,272.9 10,000,073,611.0 10,419 10,000,146,360 4,730,020,985.8 sem_timedwait
|
| 189 |
+
8.2 355,562,427,280 39,343 9,037,501.6 1,512.0 1,000 12,219,368,064 127,255,975.8 poll
|
| 190 |
+
3.0 131,715,516,186 11,467 11,486,484.4 7,170,376.0 28,456 585,263,318 14,022,810.8 sem_wait
|
| 191 |
+
2.6 112,179,081,690 41,286 2,717,121.6 2,213.0 1,000 94,667,308,194 468,888,772.5 read
|
| 192 |
+
0.0 793,423,700 330 2,404,314.2 1,354,549.5 1,900 18,406,079 2,639,127.9 pthread_rwlock_wrlock
|
| 193 |
+
0.0 494,035,258 199,029 2,482.2 1,380.0 1,000 72,123,224 161,811.1 munmap
|
| 194 |
+
0.0 298,099,876 8,608 34,630.6 10,089.5 1,002 29,694,619 390,579.6 ioctl
|
| 195 |
+
0.0 220,903,685 369 598,655.0 2,510.0 1,159 22,536,588 3,264,985.3 fopen
|
| 196 |
+
0.0 121,576,605 24 5,065,691.9 5,064,718.5 5,053,737 5,087,479 7,589.0 nanosleep
|
| 197 |
+
0.0 110,429,690 30,645 3,603.5 2,536.0 1,000 19,587,041 111,890.3 open64
|
| 198 |
+
0.0 88,325,670 79 1,118,046.5 3,103.0 1,011 81,521,546 9,166,201.0 waitpid
|
| 199 |
+
0.0 76,471,088 18,154 4,212.4 3,660.0 1,000 1,659,563 15,417.3 mmap64
|
| 200 |
+
0.0 74,586,704 96 776,944.8 3,874.0 1,020 19,635,272 3,727,062.1 open
|
| 201 |
+
0.0 72,521,171 8,897 8,151.2 4,707.0 1,022 2,826,985 34,660.7 recv
|
| 202 |
+
0.0 71,841,066 8,895 8,076.6 5,429.0 1,571 84,986 7,239.5 send
|
| 203 |
+
0.0 69,801,955 41,067 1,699.7 1,627.0 1,000 32,060 794.7 pthread_cond_signal
|
| 204 |
+
0.0 67,085,379 39 1,720,137.9 470,979.0 3,544 10,373,042 3,340,509.2 pthread_join
|
| 205 |
+
0.0 56,617,564 10 5,661,756.4 18,705.5 8,315 56,388,994 17,823,747.2 connect
|
| 206 |
+
0.0 51,207,160 14,809 3,457.8 2,380.0 1,013 139,725 5,733.7 write
|
| 207 |
+
0.0 40,211,592 4,773 8,424.8 6,319.0 1,000 661,710 13,119.1 pthread_mutex_lock
|
| 208 |
+
0.0 16,225,859 10,123 1,602.9 1,387.0 1,000 17,416 730.6 epoll_ctl
|
| 209 |
+
0.0 9,852,733 147 67,025.4 68,737.0 55,805 95,256 5,155.2 sleep
|
| 210 |
+
0.0 7,858,705 22 357,213.9 474,706.5 8,796 678,261 278,233.7 pthread_rwlock_rdlock
|
| 211 |
+
0.0 7,721,440 131 58,942.3 56,260.0 21,296 195,560 26,034.2 pthread_create
|
| 212 |
+
0.0 7,224,126 929 7,776.2 3,096.0 1,000 86,864 11,142.9 fgets
|
| 213 |
+
0.0 1,723,609 344 5,010.5 4,755.0 1,827 40,649 2,516.1 fopen64
|
| 214 |
+
0.0 1,708,972 62 27,564.1 2,983.5 1,002 230,421 59,773.0 futex
|
| 215 |
+
0.0 1,347,355 1,069 1,260.4 1,023.0 1,000 12,904 880.3 fclose
|
| 216 |
+
0.0 1,149,466 196 5,864.6 3,579.5 1,105 168,420 13,582.2 mmap
|
| 217 |
+
0.0 878,967 1 878,967.0 878,967.0 878,967 878,967 0.0 fork
|
| 218 |
+
0.0 364,215 65 5,603.3 5,028.0 1,909 15,104 3,123.9 pipe2
|
| 219 |
+
0.0 247,833 41 6,044.7 4,941.0 1,709 17,457 4,172.6 socket
|
| 220 |
+
0.0 188,362 19 9,913.8 3,097.0 1,045 62,742 16,639.4 bind
|
| 221 |
+
0.0 128,433 34 3,777.4 3,261.0 1,187 14,840 2,461.4 pthread_cond_broadcast
|
| 222 |
+
0.0 76,747 7 10,963.9 9,959.0 3,576 31,262 9,493.1 fread
|
| 223 |
+
0.0 65,399 41 1,595.1 1,200.0 1,012 5,988 1,063.6 fcntl
|
| 224 |
+
0.0 49,079 5 9,815.8 9,542.0 4,750 17,158 4,761.0 accept4
|
| 225 |
+
0.0 42,725 25 1,709.0 1,806.0 1,011 2,296 397.2 sigaction
|
| 226 |
+
0.0 40,441 20 2,022.1 2,166.5 1,063 3,618 818.8 dup2
|
| 227 |
+
0.0 39,878 15 2,658.5 2,065.0 1,267 7,040 1,459.6 stat
|
| 228 |
+
0.0 31,245 12 2,603.8 1,918.0 1,006 5,220 1,771.4 fflush
|
| 229 |
+
0.0 27,179 5 5,435.8 5,277.0 1,662 9,374 3,035.3 fwrite
|
| 230 |
+
0.0 21,540 4 5,385.0 5,545.5 4,572 5,877 575.6 lstat
|
| 231 |
+
0.0 17,255 4 4,313.8 4,516.5 2,856 5,366 1,051.1 flock
|
| 232 |
+
0.0 16,827 9 1,869.7 1,599.0 1,008 3,313 844.3 pread
|
| 233 |
+
0.0 15,569 10 1,556.9 1,444.0 1,184 2,260 325.7 listen
|
| 234 |
+
0.0 13,074 3 4,358.0 4,294.0 4,285 4,495 118.7 fputs_unlocked
|
| 235 |
+
0.0 12,439 5 2,487.8 2,713.0 1,831 3,023 566.6 mprotect
|
| 236 |
+
0.0 7,489 4 1,872.3 1,856.5 1,636 2,140 206.8 flockfile
|
| 237 |
+
0.0 6,919 1 6,919.0 6,919.0 6,919 6,919 0.0 kill
|
| 238 |
+
0.0 5,460 2 2,730.0 2,730.0 2,008 3,452 1,021.1 openat64
|
| 239 |
+
0.0 5,297 3 1,765.7 1,842.0 1,157 2,298 574.3 fstat
|
| 240 |
+
0.0 3,627 1 3,627.0 3,627.0 3,627 3,627 0.0 fputs
|
| 241 |
+
|
| 242 |
+
[5/8] Executing 'cuda_api_sum' stats report
|
| 243 |
+
|
| 244 |
+
Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
|
| 245 |
+
-------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
|
| 246 |
+
65.2 20,318,348,211 12,196 1,665,984.6 3,806.0 1,713 143,905,820 4,877,424.6 cudaStreamSynchronize
|
| 247 |
+
19.6 6,114,479,110 979,090 6,245.1 4,893.0 826 61,317,707 103,504.7 cudaLaunchKernel
|
| 248 |
+
5.9 1,844,232,020 151,960 12,136.3 9,991.0 7,437 6,397,280 55,443.5 cudaGraphLaunch_v10000
|
| 249 |
+
4.4 1,366,270,456 61,593 22,182.2 8,605.0 2,898 97,438,741 427,651.1 cudaMemcpyAsync
|
| 250 |
+
2.1 657,913,070 123,014 5,348.3 4,791.0 646 11,218,430 74,172.4 cuLaunchKernel
|
| 251 |
+
0.7 225,191,175 1,943 115,898.7 75,223.0 40,921 1,507,774 191,247.4 cudaGraphInstantiateWithFlags_v11040
|
| 252 |
+
0.6 190,028,321 2,135 89,006.2 32,930.0 5,778 121,430,749 2,627,383.4 cudaDeviceSynchronize
|
| 253 |
+
0.4 131,733,072 24,728 5,327.3 5,346.0 183 7,263,692 48,804.8 cudaMemsetAsync
|
| 254 |
+
0.4 117,166,497 154,261 759.5 737.0 297 9,955 164.6 cudaStreamIsCapturing_v10000
|
| 255 |
+
0.2 54,817,946 222 246,927.7 125,544.5 64,964 2,389,846 359,442.8 cudaFree
|
| 256 |
+
0.1 41,574,143 348 119,465.9 111,957.5 6,496 1,314,648 70,124.0 cudaMalloc
|
| 257 |
+
0.1 25,470,407 10 2,547,040.7 2,568,202.0 60,182 4,674,895 1,473,736.1 cuLibraryLoadData
|
| 258 |
+
0.0 14,126,639 13,502 1,046.3 512.0 267 4,070,645 36,594.4 cuKernelGetFunction
|
| 259 |
+
0.0 11,511,739 169 68,116.8 73,800.0 26,538 398,968 40,288.3 cuModuleLoadData
|
| 260 |
+
0.0 9,477,345 18,895 501.6 477.0 305 7,151 120.4 cudaStreamGetCaptureInfo_v2_v11030
|
| 261 |
+
0.0 8,547,159 1,943 4,398.9 4,349.0 3,274 12,306 644.1 cudaStreamBeginCapture_v10000
|
| 262 |
+
0.0 7,583,507 1,943 3,903.0 3,886.0 2,371 10,115 530.0 cudaGraphDestroy_v10000
|
| 263 |
+
0.0 2,953,354 128 23,073.1 2,127.0 1,339 976,651 118,496.6 cudaStreamCreateWithPriority
|
| 264 |
+
0.0 2,583,759 1,943 1,329.8 1,322.0 973 2,362 129.3 cudaStreamEndCapture_v10000
|
| 265 |
+
0.0 1,910,887 26 73,495.7 12,773.5 3,625 1,207,162 232,915.8 cudaHostAlloc
|
| 266 |
+
0.0 1,625,828 1,943 836.8 771.0 625 3,016 254.6 cudaGraphGetNodes_v10000
|
| 267 |
+
0.0 943,862 310 3,044.7 2,639.0 879 11,991 1,944.4 cudaEventQuery
|
| 268 |
+
0.0 731,374 311 2,351.7 2,439.0 991 7,657 1,133.3 cudaEventRecord
|
| 269 |
+
0.0 219,541 8 27,442.6 26,305.5 8,804 64,233 18,995.2 cudaMemGetInfo
|
| 270 |
+
0.0 140,500 810 173.5 143.0 85 1,704 110.6 cuGetProcAddress_v2
|
| 271 |
+
0.0 21,914 21 1,043.5 438.0 339 4,729 1,202.9 cudaEventCreateWithFlags
|
| 272 |
+
0.0 16,258 16 1,016.1 849.5 502 2,663 551.3 cuLibraryGetKernel
|
| 273 |
+
0.0 8,991 14 642.2 586.0 346 1,420 261.0 cudaThreadExchangeStreamCaptureMode_v10010
|
| 274 |
+
0.0 4,849 3 1,616.3 1,664.0 1,386 1,799 210.6 cuInit
|
| 275 |
+
0.0 3,460 4 865.0 749.0 110 1,852 882.7 cuModuleGetLoadingMode
|
| 276 |
+
0.0 3,416 1 3,416.0 3,416.0 3,416 3,416 0.0 cudaStreamWaitEvent
|
| 277 |
+
0.0 1,901 1 1,901.0 1,901.0 1,901 1,901 0.0 cudaEventDestroy
|
| 278 |
+
0.0 1,166 2 583.0 583.0 248 918 473.8 cudaGetDriverEntryPoint_v11030
|
| 279 |
+
|
| 280 |
+
[6/8] Executing 'cuda_gpu_kern_sum' stats report
|
| 281 |
+
|
| 282 |
+
Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
|
| 283 |
+
-------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
|
| 284 |
+
33.5 9,507,682,829 84,588 112,399.9 58,880.0 5,728 569,477 137,306.6 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
|
| 285 |
+
27.2 7,733,720,607 29,164 265,180.4 333,123.0 33,344 763,622 112,810.5 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
|
| 286 |
+
7.4 2,089,759,636 1,164 1,795,326.1 1,390,859.0 40,065 4,518,698 1,024,759.1 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
|
| 287 |
+
3.3 942,739,585 5,754 163,840.7 13,376.0 1,951 1,008,234 293,922.8 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
|
| 288 |
+
3.3 942,069,183 76,664 12,288.3 8,032.0 6,240 73,248 8,482.6 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
|
| 289 |
+
3.3 926,915,977 5,958 155,575.0 73,120.5 7,649 549,540 194,687.6 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
|
| 290 |
+
2.8 781,701,830 1,991 392,617.7 496,547.0 10,528 506,724 194,713.8 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
|
| 291 |
+
2.5 718,309,736 5,756 124,793.2 9,920.0 5,151 716,420 213,447.3 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
|
| 292 |
+
2.4 679,170,209 292,768 2,319.8 1,889.0 1,631 6,304 962.1 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 293 |
+
2.1 605,586,081 14,252 42,491.3 42,529.0 26,240 62,817 1,642.0 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
|
| 294 |
+
1.8 516,743,427 13,776 37,510.4 37,472.0 36,608 42,560 332.8 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
|
| 295 |
+
1.3 366,625,152 16,268 22,536.6 23,936.0 1,055 462,659 19,521.4 triton_poi_fused_mul_silu_1
|
| 296 |
+
1.2 345,171,402 112 3,081,887.5 3,078,316.5 3,036,028 3,128,477 29,283.9 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
|
| 297 |
+
0.9 260,532,523 513 507,860.7 507,843.0 506,403 509,475 427.2 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x1_tn_align8>(T1::Par…
|
| 298 |
+
0.9 255,622,286 604 423,215.7 487,970.0 7,008 488,866 160,519.3 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
|
| 299 |
+
0.9 242,493,430 161,056 1,505.6 1,280.0 1,023 3,488 458.1 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
|
| 300 |
+
0.7 206,425,050 146,384 1,410.2 1,344.0 1,183 2,208 221.6 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 301 |
+
0.7 203,499,133 184 1,105,973.5 579,541.0 369,795 2,808,909 981,858.0 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
|
| 302 |
+
0.7 187,786,954 1,120 167,666.9 158,929.0 40,416 1,415,463 206,084.0 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
|
| 303 |
+
0.6 180,432,004 16,268 11,091.2 11,936.0 1,505 111,617 4,905.8 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
|
| 304 |
+
0.5 135,678,243 43,792 3,098.2 3,104.0 2,943 3,616 73.3 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
|
| 305 |
+
0.3 98,055,325 32,872 2,982.9 2,945.0 2,847 3,233 98.3 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
|
| 306 |
+
0.3 83,656,872 16,268 5,142.4 5,472.0 1,536 79,136 3,233.8 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
|
| 307 |
+
0.2 52,052,585 15,687 3,318.2 3,457.0 1,344 22,048 905.0 triton_poi_fused_cat_3
|
| 308 |
+
0.1 39,252,777 8 4,906,597.1 4,863,769.0 4,802,745 5,085,370 117,043.6 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
|
| 309 |
+
0.1 35,101,287 15,687 2,237.6 2,336.0 863 16,672 687.2 triton_poi_fused_view_5
|
| 310 |
+
0.1 26,987,031 17,256 1,563.9 1,408.0 1,023 2,784 465.1 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
|
| 311 |
+
0.1 22,634,043 784 28,870.0 12,543.5 11,616 62,720 20,595.1 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
|
| 312 |
+
0.1 22,584,985 15,687 1,439.7 1,440.0 1,215 6,720 194.4 triton_poi_fused_cat_4
|
| 313 |
+
0.1 20,606,081 5,888 3,499.7 3,136.0 2,687 7,488 970.6 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
|
| 314 |
+
0.1 20,451,721 4 5,112,930.3 5,112,153.5 4,937,754 5,289,660 201,308.2 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
|
| 315 |
+
0.1 15,066,944 5,752 2,619.4 2,368.0 1,952 3,969 554.4 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
|
| 316 |
+
0.1 14,293,381 28 510,477.9 512,002.5 468,706 513,474 8,208.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
|
| 317 |
+
0.0 9,733,874 4 2,433,468.5 2,435,244.5 2,367,692 2,495,693 60,628.3 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
|
| 318 |
+
0.0 9,136,049 28 326,287.5 326,210.0 324,514 329,538 982.3 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
|
| 319 |
+
0.0 8,425,678 224 37,614.6 37,568.5 36,608 38,880 409.5 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
|
| 320 |
+
0.0 7,777,256 2 3,888,628.0 3,888,628.0 3,705,715 4,071,541 258,678.0 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
|
| 321 |
+
0.0 7,758,429 8,970 864.9 864.0 767 1,280 77.7 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
|
| 322 |
+
0.0 7,519,688 5,754 1,306.9 1,152.0 1,023 2,048 279.6 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
|
| 323 |
+
0.0 7,231,396 336 21,522.0 21,440.0 21,056 22,592 362.7 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
|
| 324 |
+
0.0 6,130,365 476 12,878.9 12,800.5 11,744 14,432 645.4 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
|
| 325 |
+
0.0 5,896,061 4 1,474,015.3 1,473,463.0 1,473,191 1,475,944 1,292.3 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
|
| 326 |
+
0.0 5,380,367 5,752 935.4 896.0 863 1,344 76.3 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
|
| 327 |
+
0.0 4,603,692 5,292 869.9 864.0 767 1,185 33.2 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
|
| 328 |
+
0.0 3,996,949 2 1,998,474.5 1,998,474.5 1,996,490 2,000,459 2,806.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
|
| 329 |
+
0.0 3,842,156 4,143 927.4 896.0 800 1,856 93.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
|
| 330 |
+
0.0 3,593,747 56 64,174.1 64,144.5 63,105 65,728 478.5 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
|
| 331 |
+
0.0 3,433,971 4 858,492.8 858,901.0 855,877 860,292 2,168.9 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 332 |
+
0.0 3,191,215 2 1,595,607.5 1,595,607.5 1,560,359 1,630,856 49,848.9 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
|
| 333 |
+
0.0 2,871,002 1,512 1,898.8 1,760.0 1,312 2,912 445.3 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
|
| 334 |
+
0.0 2,643,071 581 4,549.2 4,671.0 1,984 36,256 1,427.2 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
|
| 335 |
+
0.0 2,581,742 2 1,290,871.0 1,290,871.0 1,290,663 1,291,079 294.2 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
|
| 336 |
+
0.0 2,581,389 2 1,290,694.5 1,290,694.5 1,290,406 1,290,983 408.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 337 |
+
0.0 2,421,998 112 21,625.0 21,552.0 9,408 34,465 12,020.4 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
|
| 338 |
+
0.0 1,835,794 581 3,159.7 3,200.0 1,632 39,200 1,543.9 triton_poi_fused_cat_1
|
| 339 |
+
0.0 1,365,128 2 682,564.0 682,564.0 677,764 687,364 6,788.2 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
|
| 340 |
+
0.0 1,304,994 581 2,246.1 2,368.0 863 14,272 629.4 triton_poi_fused_view_3
|
| 341 |
+
0.0 1,202,982 56 21,481.8 21,456.0 21,152 21,888 275.5 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn
|
| 342 |
+
0.0 1,027,854 1,153 891.5 896.0 800 1,216 36.7 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
|
| 343 |
+
0.0 956,098 28 34,146.4 34,736.5 17,920 35,200 3,188.0 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
|
| 344 |
+
0.0 847,695 581 1,459.0 1,440.0 1,216 9,408 336.4 triton_poi_fused_cat_2
|
| 345 |
+
0.0 611,794 673 909.1 896.0 864 1,025 28.1 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<long>, std::array<char *, (uns…
|
| 346 |
+
0.0 417,574 308 1,355.8 1,344.0 1,311 1,504 21.9 void vllm::merge_attn_states_kernel<__nv_bfloat16, (unsigned int)128>(T1 *, float *, const T1 *, co…
|
| 347 |
+
0.0 295,335 168 1,757.9 1,760.0 1,535 2,080 119.1 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
|
| 348 |
+
0.0 155,841 1 155,841.0 155,841.0 155,841 155,841 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
|
| 349 |
+
0.0 78,880 1 78,880.0 78,880.0 78,880 78,880 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
|
| 350 |
+
0.0 63,740 58 1,099.0 896.0 864 11,360 1,372.6 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
|
| 351 |
+
0.0 43,936 1 43,936.0 43,936.0 43,936 43,936 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
|
| 352 |
+
0.0 36,570 28 1,306.1 1,312.0 1,280 1,376 19.5 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
|
| 353 |
+
0.0 26,816 1 26,816.0 26,816.0 26,816 26,816 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
|
| 354 |
+
0.0 19,520 1 19,520.0 19,520.0 19,520 19,520 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 355 |
+
0.0 11,936 11 1,085.1 864.0 864 1,568 286.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
|
| 356 |
+
0.0 10,752 2 5,376.0 5,376.0 5,120 5,632 362.0 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
|
| 357 |
+
0.0 9,152 2 4,576.0 4,576.0 4,480 4,672 135.8 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
|
| 358 |
+
0.0 3,616 2 1,808.0 1,808.0 1,600 2,016 294.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 359 |
+
0.0 3,424 2 1,712.0 1,712.0 1,664 1,760 67.9 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
|
| 360 |
+
0.0 3,136 2 1,568.0 1,568.0 1,504 1,632 90.5 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
|
| 361 |
+
0.0 3,104 2 1,552.0 1,552.0 1,344 1,760 294.2 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
|
| 362 |
+
0.0 2,975 2 1,487.5 1,487.5 992 1,983 700.7 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
|
| 363 |
+
0.0 2,912 2 1,456.0 1,456.0 1,344 1,568 158.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
|
| 364 |
+
0.0 2,336 1 2,336.0 2,336.0 2,336 2,336 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
|
| 365 |
+
0.0 1,184 1 1,184.0 1,184.0 1,184 1,184 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
|
| 366 |
+
0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
|
| 367 |
+
0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
|
| 368 |
+
0.0 896 1 896.0 896.0 896 896 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
|
| 369 |
+
|
| 370 |
+
[7/8] Executing 'cuda_gpu_mem_time_sum' stats report
|
| 371 |
+
|
| 372 |
+
Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
|
| 373 |
+
-------- --------------- ------ -------- -------- -------- ---------- ----------- ------------------------------
|
| 374 |
+
93.2 540,571,743 41,277 13,096.2 352.0 287 97,068,545 513,408.1 [CUDA memcpy Host-to-Device]
|
| 375 |
+
3.2 18,710,334 14,564 1,284.7 896.0 864 1,362,855 22,521.7 [CUDA memcpy Device-to-Device]
|
| 376 |
+
2.5 14,536,294 21,760 668.0 768.0 287 7,744 311.5 [CUDA memset]
|
| 377 |
+
1.1 6,503,130 5,752 1,130.6 1,120.0 863 1,760 95.6 [CUDA memcpy Device-to-Host]
|
| 378 |
+
|
| 379 |
+
[8/8] Executing 'cuda_gpu_mem_size_sum' stats report
|
| 380 |
+
|
| 381 |
+
Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
|
| 382 |
+
---------- ------ -------- -------- -------- -------- ----------- ------------------------------
|
| 383 |
+
4,190.741 41,277 0.102 0.000 0.000 466.747 2.619 [CUDA memcpy Host-to-Device]
|
| 384 |
+
2,534.048 14,564 0.174 0.003 0.003 622.330 10.312 [CUDA memcpy Device-to-Device]
|
| 385 |
+
14.589 21,760 0.001 0.001 0.000 0.006 0.000 [CUDA memset]
|
| 386 |
+
4.192 5,752 0.001 0.000 0.000 0.004 0.001 [CUDA memcpy Device-to-Host]
|
| 387 |
+
|
| 388 |
+
Generated:
|
| 389 |
+
/data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding.nsys-rep
|
| 390 |
+
/data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding.sqlite
|
traverse_bs_util_sim_decoding.nsys-rep
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9cb1e2cf07fcb0e9c21d44c66cdf7762bc995625a1a55e835b8cffc5a6104b8
|
| 3 |
+
size 109416019
|
traverse_bs_util_sim_decoding.py
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import statistics
|
| 4 |
+
from typing import List, Tuple, Dict
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.cuda.nvtx as nvtx
|
| 8 |
+
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
from transformers import AutoTokenizer
|
| 11 |
+
|
| 12 |
+
# ========= 强制使用 vLLM V1 =========
|
| 13 |
+
os.environ.setdefault("VLLM_USE_V1", "1")
|
| 14 |
+
os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
|
| 15 |
+
|
| 16 |
+
# 可选:打开 V1 metrics 统计
|
| 17 |
+
os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
|
| 18 |
+
|
| 19 |
+
# ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
|
| 20 |
+
try:
|
| 21 |
+
from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
|
| 22 |
+
except Exception:
|
| 23 |
+
Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
|
| 24 |
+
|
| 25 |
+
# ========= 配置 =========
|
| 26 |
+
MODEL_NAME = "Qwen/Qwen2-1.5B"
|
| 27 |
+
DTYPE = "bfloat16"
|
| 28 |
+
TP = 1
|
| 29 |
+
GPU_MEM_UTIL = 0.90
|
| 30 |
+
TRUST_REMOTE_CODE = True
|
| 31 |
+
|
| 32 |
+
# 场景:prefill=输入tokens,decode=输出tokens
|
| 33 |
+
SCENARIOS = [
|
| 34 |
+
# {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
|
| 35 |
+
{"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
|
| 36 |
+
# {"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
|
| 40 |
+
|
| 41 |
+
SEED = 1234
|
| 42 |
+
TEMPERATURE = 0.0
|
| 43 |
+
TOP_P = 1.0
|
| 44 |
+
WARMUP_PER_BS = 1 # 每个批次做一次预热
|
| 45 |
+
|
| 46 |
+
# ========= 构造“精确 token 数量”的 prompt =========
|
| 47 |
+
def build_exact_token_prompt(tokenizer, target_len: int) -> str:
|
| 48 |
+
if target_len <= 1:
|
| 49 |
+
# 最小化 prompt:用一个简单 token(避免空串导致0 token)
|
| 50 |
+
ids = tokenizer("A", add_special_tokens=False)["input_ids"]
|
| 51 |
+
if len(ids) >= 1:
|
| 52 |
+
return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 53 |
+
|
| 54 |
+
base_text = (
|
| 55 |
+
"You are a helpful assistant. "
|
| 56 |
+
"Please analyze the following input and respond succinctly. "
|
| 57 |
+
)
|
| 58 |
+
chunk = " ".join(["data"] * 100) + ". "
|
| 59 |
+
text = base_text + chunk * 200 # 足够长的文本
|
| 60 |
+
|
| 61 |
+
lo, hi = 0, len(text)
|
| 62 |
+
target_ids = None
|
| 63 |
+
while lo <= hi:
|
| 64 |
+
mid = (lo + hi) // 2
|
| 65 |
+
ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
|
| 66 |
+
if len(ids) == target_len:
|
| 67 |
+
target_ids = ids
|
| 68 |
+
break
|
| 69 |
+
if len(ids) < target_len:
|
| 70 |
+
lo = mid + 1
|
| 71 |
+
else:
|
| 72 |
+
hi = mid - 1
|
| 73 |
+
|
| 74 |
+
if target_ids is None:
|
| 75 |
+
ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
|
| 76 |
+
if len(ids) > target_len:
|
| 77 |
+
target_ids = ids[:target_len]
|
| 78 |
+
else:
|
| 79 |
+
filler = " data"
|
| 80 |
+
while len(ids) < target_len:
|
| 81 |
+
ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
|
| 82 |
+
target_ids = ids[:target_len]
|
| 83 |
+
|
| 84 |
+
prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 85 |
+
# 断言精确长度
|
| 86 |
+
assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
|
| 87 |
+
return prompt
|
| 88 |
+
|
| 89 |
+
# ========= V1 metrics 抽取工具 =========
|
| 90 |
+
TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
|
| 91 |
+
TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
|
| 92 |
+
|
| 93 |
+
def _iter_children_of_vector(vec_obj):
|
| 94 |
+
for attr in ("children", "metrics", "series", "values", "samples", "items"):
|
| 95 |
+
if hasattr(vec_obj, attr):
|
| 96 |
+
val = getattr(vec_obj, attr)
|
| 97 |
+
if isinstance(val, dict):
|
| 98 |
+
for v in val.values():
|
| 99 |
+
yield v
|
| 100 |
+
else:
|
| 101 |
+
try:
|
| 102 |
+
for v in val:
|
| 103 |
+
yield v
|
| 104 |
+
except TypeError:
|
| 105 |
+
pass
|
| 106 |
+
|
| 107 |
+
def _collect_hist_sum_count(metrics, metric_name: str):
|
| 108 |
+
total_sum = 0.0
|
| 109 |
+
total_count = 0.0
|
| 110 |
+
for m in metrics:
|
| 111 |
+
mname = getattr(m, "name", None)
|
| 112 |
+
if mname != metric_name:
|
| 113 |
+
continue
|
| 114 |
+
# 直接 Histogram
|
| 115 |
+
if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
|
| 116 |
+
total_sum += float(getattr(m, "sum", 0.0))
|
| 117 |
+
total_count += float(getattr(m, "count", 0.0))
|
| 118 |
+
continue
|
| 119 |
+
# Vector[Histogram]
|
| 120 |
+
if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
|
| 121 |
+
for child in _iter_children_of_vector(m):
|
| 122 |
+
if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
|
| 123 |
+
total_sum += float(getattr(child, "sum", 0.0))
|
| 124 |
+
total_count += float(getattr(child, "count", 0.0))
|
| 125 |
+
return total_sum, total_count
|
| 126 |
+
|
| 127 |
+
def _metrics_snapshot(llm) -> Dict[str, float]:
|
| 128 |
+
try:
|
| 129 |
+
mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
|
| 130 |
+
except Exception:
|
| 131 |
+
return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
|
| 132 |
+
ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
|
| 133 |
+
tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
|
| 134 |
+
return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
|
| 135 |
+
|
| 136 |
+
def _metrics_delta(before: dict, after: dict):
|
| 137 |
+
return {
|
| 138 |
+
"ttft_sum": after["ttft_sum"] - before["ttft_sum"],
|
| 139 |
+
"ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
|
| 140 |
+
"tpot_sum": after["tpot_sum"] - before["tpot_sum"],
|
| 141 |
+
"tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
# ========= 带 NVTX 的 generate 包装 =========
|
| 145 |
+
def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
|
| 146 |
+
return llm.generate(prompts, params)
|
| 147 |
+
|
| 148 |
+
# ========= 统计格式化 =========
|
| 149 |
+
def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
|
| 150 |
+
xs = [v for v in x if (v == v)] # 过滤 NaN
|
| 151 |
+
if not xs:
|
| 152 |
+
return (float("nan"), float("nan"), float("nan"))
|
| 153 |
+
return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
|
| 154 |
+
|
| 155 |
+
def main():
|
| 156 |
+
print("--- vLLM V1 基准测试(含 NVTX 标记)---")
|
| 157 |
+
print(f"模型: {MODEL_NAME}")
|
| 158 |
+
print(f"批量大小: {BATCH_SIZES}")
|
| 159 |
+
print(f"场景: {[s['name'] for s in SCENARIOS]}")
|
| 160 |
+
print("-" * 60)
|
| 161 |
+
|
| 162 |
+
if not torch.cuda.is_available():
|
| 163 |
+
print("错误:需要 CUDA GPU。")
|
| 164 |
+
return
|
| 165 |
+
|
| 166 |
+
print("加载分词器/模型中...")
|
| 167 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
|
| 168 |
+
|
| 169 |
+
# 用 NVTX 标记模型加载阶段
|
| 170 |
+
nvtx.range_push("LLM_init")
|
| 171 |
+
llm = LLM(
|
| 172 |
+
model=MODEL_NAME,
|
| 173 |
+
tensor_parallel_size=TP,
|
| 174 |
+
dtype=DTYPE,
|
| 175 |
+
trust_remote_code=TRUST_REMOTE_CODE,
|
| 176 |
+
gpu_memory_utilization=GPU_MEM_UTIL,
|
| 177 |
+
max_num_seqs=1024, # 足够覆盖本次扫描
|
| 178 |
+
max_model_len=4096,
|
| 179 |
+
disable_log_stats=False, # 开启 V1 metrics 收集
|
| 180 |
+
)
|
| 181 |
+
nvtx.range_pop()
|
| 182 |
+
print("模型加载完成。")
|
| 183 |
+
|
| 184 |
+
for sc in SCENARIOS:
|
| 185 |
+
name = sc["name"]
|
| 186 |
+
prompt_tokens = sc["prompt_tokens"]
|
| 187 |
+
max_new_tokens = sc["max_new_tokens"]
|
| 188 |
+
|
| 189 |
+
print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
|
| 190 |
+
|
| 191 |
+
# 准备精确长度 prompt
|
| 192 |
+
prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
|
| 193 |
+
|
| 194 |
+
# 采样参数(贪心)
|
| 195 |
+
# sampling_params = SamplingParams(
|
| 196 |
+
# max_tokens=max_new_tokens,
|
| 197 |
+
# temperature=TEMPERATURE,
|
| 198 |
+
# top_p=TOP_P,
|
| 199 |
+
# seed=SEED,
|
| 200 |
+
# n=1,
|
| 201 |
+
# )
|
| 202 |
+
|
| 203 |
+
sampling_params = SamplingParams(
|
| 204 |
+
max_tokens=max_new_tokens, # 比如 512
|
| 205 |
+
# 关键点:
|
| 206 |
+
ignore_eos=True, # 忽略 EOS,继续生成
|
| 207 |
+
stop=None, # 不设 stop 字符串
|
| 208 |
+
stop_token_ids=[], # 不设 stop token
|
| 209 |
+
# 选配:如果你的 vLLM 版本支持
|
| 210 |
+
min_tokens=max_new_tokens, # 至少生成 N 个 token(≤ max_tokens)
|
| 211 |
+
temperature=0.0,
|
| 212 |
+
top_p=1.0,
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# 记录每个 bs 的结果(便于后续统计或外部解析)
|
| 216 |
+
for bs in BATCH_SIZES:
|
| 217 |
+
print(f"\n--- 批量大小 bs={bs} ---")
|
| 218 |
+
|
| 219 |
+
prompts = [prompt_text] * bs
|
| 220 |
+
|
| 221 |
+
# 预热
|
| 222 |
+
# print("预热中...")
|
| 223 |
+
# nvtx.range_push(f"WARMUP [{name}] bs={bs}")
|
| 224 |
+
# _ = decorated_generate(llm, [prompts[0]], sampling_params)
|
| 225 |
+
# torch.cuda.synchronize()
|
| 226 |
+
# nvtx.range_pop()
|
| 227 |
+
|
| 228 |
+
# 正式计时与 V1 metrics
|
| 229 |
+
# nvtx.range_push(f"RUN [{name}] bs={bs}")
|
| 230 |
+
torch.cuda.synchronize()
|
| 231 |
+
snap_before = _metrics_snapshot(llm)
|
| 232 |
+
t0 = time.perf_counter()
|
| 233 |
+
|
| 234 |
+
nvtx.range_push(f"generate [{name}] bs={bs}")
|
| 235 |
+
outputs = decorated_generate(llm, prompts, sampling_params)
|
| 236 |
+
nvtx.range_pop() # generate
|
| 237 |
+
|
| 238 |
+
torch.cuda.synchronize()
|
| 239 |
+
t1 = time.perf_counter()
|
| 240 |
+
snap_after = _metrics_snapshot(llm)
|
| 241 |
+
# nvtx.range_pop() # RUN
|
| 242 |
+
|
| 243 |
+
duration = t1 - t0
|
| 244 |
+
|
| 245 |
+
# 统计 token 与吞吐
|
| 246 |
+
total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
|
| 247 |
+
avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
|
| 248 |
+
throughput = total_output_tokens / duration if duration > 0 else float("inf")
|
| 249 |
+
|
| 250 |
+
# 解析 V1 TTFT / 解码吞吐
|
| 251 |
+
delta = _metrics_delta(snap_before, snap_after)
|
| 252 |
+
if delta["ttft_cnt"] > 0:
|
| 253 |
+
ttft = delta["ttft_sum"] / delta["ttft_cnt"]
|
| 254 |
+
else:
|
| 255 |
+
ttft = float("nan")
|
| 256 |
+
|
| 257 |
+
if delta["tpot_cnt"] > 0:
|
| 258 |
+
avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
|
| 259 |
+
decode_tps = 1.0 / avg_tpot
|
| 260 |
+
else:
|
| 261 |
+
decode_tps = float("nan")
|
| 262 |
+
|
| 263 |
+
print(f"执行时间: {duration:.4f} s")
|
| 264 |
+
print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
|
| 265 |
+
print(f"生成总 tokens: {total_output_tokens}")
|
| 266 |
+
print(f"吞吐(生成tokens/秒): {throughput:.2f}")
|
| 267 |
+
print(f"TTFT (V1 metrics): {ttft:.4f} s")
|
| 268 |
+
print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
|
| 269 |
+
|
| 270 |
+
print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
|
| 271 |
+
|
| 272 |
+
if __name__ == "__main__":
|
| 273 |
+
print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
|
| 274 |
+
main()
|
traverse_bs_util_sim_prefill.log
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
WARNING: CPU IP/backtrace sampling not supported, disabling.
|
| 2 |
+
Try the 'nsys status --environment' command to learn more.
|
| 3 |
+
|
| 4 |
+
WARNING: CPU context switch tracing not supported, disabling.
|
| 5 |
+
Try the 'nsys status --environment' command to learn more.
|
| 6 |
+
|
| 7 |
+
INFO 08-13 19:12:40 [__init__.py:235] Automatically detected platform cuda.
|
| 8 |
+
CUDA_VISIBLE_DEVICES = 3
|
| 9 |
+
--- vLLM V1 基准测试(含 NVTX 标记)---
|
| 10 |
+
模型: Qwen/Qwen2-1.5B
|
| 11 |
+
批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
|
| 12 |
+
场景: ['prefill640_decode1']
|
| 13 |
+
------------------------------------------------------------
|
| 14 |
+
加载分词器/模型中...
|
| 15 |
+
INFO 08-13 19:12:50 [config.py:1604] Using max model len 4096
|
| 16 |
+
INFO 08-13 19:12:50 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
|
| 17 |
+
INFO 08-13 19:12:55 [__init__.py:235] Automatically detected platform cuda.
|
| 18 |
+
INFO 08-13 19:12:57 [core.py:572] Waiting for init message from front-end.
|
| 19 |
+
INFO 08-13 19:12:57 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
|
| 20 |
+
INFO 08-13 19:12:59 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
|
| 21 |
+
WARNING 08-13 19:12:59 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 22 |
+
INFO 08-13 19:12:59 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
|
| 23 |
+
INFO 08-13 19:12:59 [gpu_model_runner.py:1875] Loading model from scratch...
|
| 24 |
+
INFO 08-13 19:12:59 [cuda.py:290] Using Flash Attention backend on V1 engine.
|
| 25 |
+
INFO 08-13 19:13:00 [weight_utils.py:296] Using model weights format ['*.safetensors']
|
| 26 |
+
INFO 08-13 19:13:00 [weight_utils.py:349] No model.safetensors.index.json found in remote.
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
INFO 08-13 19:13:01 [default_loader.py:262] Loading weights took 0.63 seconds
|
| 32 |
+
INFO 08-13 19:13:01 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 1.958554 seconds
|
| 33 |
+
INFO 08-13 19:13:07 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
|
| 34 |
+
INFO 08-13 19:13:07 [backends.py:541] Dynamo bytecode transform time: 5.46 s
|
| 35 |
+
INFO 08-13 19:13:11 [backends.py:161] Directly load the compiled graph(s) for dynamic shape from the cache, took 3.856 s
|
| 36 |
+
INFO 08-13 19:13:12 [monitor.py:34] torch.compile takes 5.46 s in total
|
| 37 |
+
INFO 08-13 19:13:13 [gpu_worker.py:255] Available KV cache memory: 12.81 GiB
|
| 38 |
+
INFO 08-13 19:13:13 [kv_cache_utils.py:833] GPU KV cache size: 479,536 tokens
|
| 39 |
+
INFO 08-13 19:13:13 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.07x
|
| 40 |
+
|
| 41 |
+
INFO 08-13 19:13:15 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
|
| 42 |
+
INFO 08-13 19:13:15 [core.py:193] init engine (profile, create kv cache, warmup model) took 14.10 seconds
|
| 43 |
+
模型加载完成。
|
| 44 |
+
|
| 45 |
+
===== 场景:prefill640_decode1 | prefill=640, decode=1 =====
|
| 46 |
+
|
| 47 |
+
--- 批量大小 bs=1 ---
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
执行时间: 0.0303 s
|
| 51 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 52 |
+
生成总 tokens: 1
|
| 53 |
+
吞吐(生成tokens/秒): 33.00
|
| 54 |
+
TTFT (V1 metrics): 0.0268 s
|
| 55 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 56 |
+
|
| 57 |
+
--- 批量大小 bs=2 ---
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
执行时间: 0.0169 s
|
| 61 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 62 |
+
生成总 tokens: 2
|
| 63 |
+
吞吐(生成tokens/秒): 118.29
|
| 64 |
+
TTFT (V1 metrics): 0.0123 s
|
| 65 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 66 |
+
|
| 67 |
+
--- 批量大小 bs=4 ---
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
执行时间: 0.0192 s
|
| 71 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 72 |
+
生成总 tokens: 4
|
| 73 |
+
吞吐(生成tokens/秒): 207.80
|
| 74 |
+
TTFT (V1 metrics): 0.0145 s
|
| 75 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 76 |
+
|
| 77 |
+
--- 批量大小 bs=8 ---
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
执行时间: 0.0265 s
|
| 81 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 82 |
+
生成总 tokens: 8
|
| 83 |
+
吞吐(生成tokens/秒): 302.46
|
| 84 |
+
TTFT (V1 metrics): 0.0151 s
|
| 85 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 86 |
+
|
| 87 |
+
--- 批量大小 bs=16 ---
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
执行时间: 0.0332 s
|
| 91 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 92 |
+
生成�� tokens: 16
|
| 93 |
+
吞吐(生成tokens/秒): 481.67
|
| 94 |
+
TTFT (V1 metrics): 0.0162 s
|
| 95 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 96 |
+
|
| 97 |
+
--- 批量大小 bs=32 ---
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
执行时间: 0.0553 s
|
| 101 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 102 |
+
生成总 tokens: 32
|
| 103 |
+
吞吐(生成tokens/秒): 578.82
|
| 104 |
+
TTFT (V1 metrics): 0.0250 s
|
| 105 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 106 |
+
|
| 107 |
+
--- 批量大小 bs=64 ---
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
执行时间: 0.0975 s
|
| 111 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 112 |
+
生成总 tokens: 64
|
| 113 |
+
吞吐(生成tokens/秒): 656.64
|
| 114 |
+
TTFT (V1 metrics): 0.0459 s
|
| 115 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 116 |
+
|
| 117 |
+
--- 批量大小 bs=128 ---
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
执行时间: 0.1830 s
|
| 121 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 122 |
+
生成总 tokens: 128
|
| 123 |
+
吞吐(生成tokens/秒): 699.40
|
| 124 |
+
TTFT (V1 metrics): 0.0911 s
|
| 125 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 126 |
+
|
| 127 |
+
--- 批量大小 bs=256 ---
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
执行时间: 0.3573 s
|
| 131 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 132 |
+
生成总 tokens: 256
|
| 133 |
+
吞吐(生成tokens/秒): 716.54
|
| 134 |
+
TTFT (V1 metrics): 0.1787 s
|
| 135 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 136 |
+
|
| 137 |
+
--- 批量大小 bs=512 ---
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
执行时间: 0.9187 s
|
| 141 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 142 |
+
生成总 tokens: 512
|
| 143 |
+
吞吐(生成tokens/秒): 557.31
|
| 144 |
+
TTFT (V1 metrics): 0.3830 s
|
| 145 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 146 |
+
|
| 147 |
+
--- 批量大小 bs=1024 ---
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
[rank0]:[W813 19:13:21.628248555 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
|
| 151 |
+
执行时间: 1.4298 s
|
| 152 |
+
实际平均输入 tokens: 640.00(目标 640)
|
| 153 |
+
生成总 tokens: 1024
|
| 154 |
+
吞吐(生成tokens/秒): 716.16
|
| 155 |
+
TTFT (V1 metrics): 0.7169 s
|
| 156 |
+
解码吞吐 (V1 metrics): nan tok/s
|
| 157 |
+
|
| 158 |
+
完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
|
| 159 |
+
GPU 3: General Metrics for NVIDIA AD10x (any frequency)
|
| 160 |
+
Generating '/tmp/nsys-report-be94.qdstrm'
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
[3/8] Executing 'nvtx_sum' stats report
|
| 164 |
+
|
| 165 |
+
Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
|
| 166 |
+
-------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- --------------------------------------
|
| 167 |
+
91.5 34,255,644,442 1 34,255,644,442.0 34,255,644,442.0 34,255,644,442 34,255,644,442 0.0 PushPop :LLM_init
|
| 168 |
+
3.8 1,429,765,717 1 1,429,765,717.0 1,429,765,717.0 1,429,765,717 1,429,765,717 0.0 PushPop :generate [prefill640_decode1] bs=1024
|
| 169 |
+
2.5 918,598,349 1 918,598,349.0 918,598,349.0 918,598,349 918,598,349 0.0 PushPop :generate [prefill640_decode1] bs=512
|
| 170 |
+
1.0 357,203,794 1 357,203,794.0 357,203,794.0 357,203,794 357,203,794 0.0 PushPop :generate [prefill640_decode1] bs=256
|
| 171 |
+
0.5 182,944,803 1 182,944,803.0 182,944,803.0 182,944,803 182,944,803 0.0 PushPop :generate [prefill640_decode1] bs=128
|
| 172 |
+
0.3 97,400,007 1 97,400,007.0 97,400,007.0 97,400,007 97,400,007 0.0 PushPop :generate [prefill640_decode1] bs=64
|
| 173 |
+
0.1 55,233,548 1 55,233,548.0 55,233,548.0 55,233,548 55,233,548 0.0 PushPop :generate [prefill640_decode1] bs=32
|
| 174 |
+
0.1 33,169,330 1 33,169,330.0 33,169,330.0 33,169,330 33,169,330 0.0 PushPop :generate [prefill640_decode1] bs=16
|
| 175 |
+
0.1 30,210,666 1 30,210,666.0 30,210,666.0 30,210,666 30,210,666 0.0 PushPop :generate [prefill640_decode1] bs=1
|
| 176 |
+
0.1 26,332,699 1 26,332,699.0 26,332,699.0 26,332,699 26,332,699 0.0 PushPop :generate [prefill640_decode1] bs=8
|
| 177 |
+
0.1 19,203,429 1 19,203,429.0 19,203,429.0 19,203,429 19,203,429 0.0 PushPop :generate [prefill640_decode1] bs=4
|
| 178 |
+
0.0 16,846,845 1 16,846,845.0 16,846,845.0 16,846,845 16,846,845 0.0 PushPop :generate [prefill640_decode1] bs=2
|
| 179 |
+
0.0 105,272 2 52,636.0 52,636.0 49,581 55,691 4,320.4 PushPop CCCL:cub::DeviceSegmentedRadixSort
|
| 180 |
+
|
| 181 |
+
[4/8] Executing 'osrt_sum' stats report
|
| 182 |
+
|
| 183 |
+
Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
|
| 184 |
+
-------- --------------- --------- ---------------- ------------ --------- -------------- ---------------- ----------------------
|
| 185 |
+
31.6 297,381,519,943 14,617 20,344,908.0 34,539.0 1,030 22,258,440,024 414,761,021.5 pthread_cond_timedwait
|
| 186 |
+
26.2 246,799,144,239 18,218 13,546,994.4 10,061,235.5 1,012 25,048,066,633 321,112,693.8 epoll_wait
|
| 187 |
+
23.6 222,303,168,174 22 10,104,689,462.5 4,996,401.5 18,889 22,259,597,278 11,328,087,649.4 pthread_cond_wait
|
| 188 |
+
8.0 75,285,383,295 9,750 7,721,577.8 1,471.0 1,000 10,010,044,231 160,821,505.6 poll
|
| 189 |
+
6.2 58,364,790,020 28 2,084,456,786.4 130,001.0 11,516 10,000,121,902 3,904,360,688.6 sem_timedwait
|
| 190 |
+
3.7 34,631,283,784 31,001 1,117,102.2 2,501.0 1,000 20,354,884,016 126,413,489.1 read
|
| 191 |
+
0.5 4,262,251,691 375 11,366,004.5 6,769,937.0 25,915 606,781,491 45,247,323.7 sem_wait
|
| 192 |
+
0.0 435,782,688 193,508 2,252.0 1,290.0 1,001 78,395,275 178,359.2 munmap
|
| 193 |
+
0.0 295,324,731 8,442 34,982.8 10,400.0 1,002 28,343,070 412,455.7 ioctl
|
| 194 |
+
0.0 233,681,765 370 631,572.3 2,278.0 1,038 20,134,490 3,304,852.9 fopen
|
| 195 |
+
0.0 121,569,511 24 5,065,396.3 5,064,025.0 5,057,109 5,077,694 5,077.1 nanosleep
|
| 196 |
+
0.0 83,589,086 30,614 2,730.4 2,405.0 1,001 93,613 1,688.8 open64
|
| 197 |
+
0.0 82,741,435 44 1,880,487.2 3,387.5 1,008 76,213,344 11,471,905.8 waitpid
|
| 198 |
+
0.0 79,313,400 95 834,877.9 3,817.0 1,120 21,595,875 3,991,908.6 open
|
| 199 |
+
0.0 67,843,028 37 1,833,595.4 560,943.0 7,912 10,421,345 3,417,478.2 pthread_join
|
| 200 |
+
0.0 63,655,491 10 6,365,549.1 20,920.5 9,183 63,456,894 20,059,858.9 connect
|
| 201 |
+
0.0 49,611,881 12,926 3,838.1 2,497.5 1,000 1,578,326 21,496.3 mmap64
|
| 202 |
+
0.0 31,354,898 11,053 2,836.8 2,317.0 1,000 98,099 4,837.4 pthread_cond_signal
|
| 203 |
+
0.0 21,849,578 147 148,636.6 68,887.0 54,551 11,982,867 982,770.0 sleep
|
| 204 |
+
0.0 18,414,594 2,437 7,556.3 4,453.0 1,032 3,216,724 67,282.5 recv
|
| 205 |
+
0.0 18,204,857 1,636 11,127.7 8,142.5 1,000 828,819 26,164.8 pthread_mutex_lock
|
| 206 |
+
0.0 14,656,134 2,431 6,028.8 4,943.0 1,611 98,063 6,075.3 send
|
| 207 |
+
0.0 13,750,593 4,854 2,832.8 2,203.0 1,000 90,410 5,299.0 write
|
| 208 |
+
0.0 6,404,270 131 48,887.6 44,033.0 19,090 137,564 20,637.3 pthread_create
|
| 209 |
+
0.0 5,992,803 873 6,864.6 3,804.0 1,003 84,435 8,636.1 fgets
|
| 210 |
+
0.0 4,101,819 2,724 1,505.8 1,399.0 1,000 14,872 523.6 epoll_ctl
|
| 211 |
+
0.0 1,737,636 344 5,051.3 4,878.5 1,791 40,256 2,280.7 fopen64
|
| 212 |
+
0.0 1,580,944 62 25,499.1 3,739.0 1,061 361,517 63,617.6 futex
|
| 213 |
+
0.0 1,578,916 19 83,100.8 26,844.0 12,846 229,739 84,676.6 pthread_rwlock_wrlock
|
| 214 |
+
0.0 1,219,162 1,029 1,184.8 1,021.0 1,000 9,329 566.5 fclose
|
| 215 |
+
0.0 979,772 196 4,998.8 3,578.5 1,240 72,994 7,158.8 mmap
|
| 216 |
+
0.0 880,707 1 880,707.0 880,707.0 880,707 880,707 0.0 fork
|
| 217 |
+
0.0 817,580 10 81,758.0 50,097.5 12,485 189,119 64,533.4 pthread_rwlock_rdlock
|
| 218 |
+
0.0 344,919 65 5,306.4 4,286.0 1,901 13,820 2,831.0 pipe2
|
| 219 |
+
0.0 227,255 41 5,542.8 4,926.0 1,403 15,831 3,463.5 socket
|
| 220 |
+
0.0 176,218 19 9,274.6 2,447.0 1,013 63,892 16,667.1 bind
|
| 221 |
+
0.0 143,818 31 4,639.3 3,145.0 1,659 15,408 3,429.6 pthread_cond_broadcast
|
| 222 |
+
0.0 76,818 7 10,974.0 10,593.0 4,207 26,562 7,634.1 fread
|
| 223 |
+
0.0 58,634 5 11,726.8 9,913.0 6,172 20,449 5,556.1 accept4
|
| 224 |
+
0.0 44,580 27 1,651.1 1,250.0 1,001 4,508 919.9 fcntl
|
| 225 |
+
0.0 38,617 15 2,574.5 2,007.0 1,212 6,517 1,396.6 stat
|
| 226 |
+
0.0 38,220 22 1,737.3 1,771.5 1,040 2,400 357.0 sigaction
|
| 227 |
+
0.0 34,286 18 1,904.8 2,174.5 1,009 3,066 665.3 dup2
|
| 228 |
+
0.0 25,737 8 3,217.1 3,143.0 1,199 5,660 2,028.2 fflush
|
| 229 |
+
0.0 20,805 5 4,161.0 3,204.0 1,689 7,529 2,419.0 fwrite
|
| 230 |
+
0.0 19,991 4 4,997.8 4,954.0 4,665 5,418 314.8 lstat
|
| 231 |
+
0.0 17,854 8 2,231.8 1,745.0 1,320 3,550 1,017.9 pread
|
| 232 |
+
0.0 15,459 11 1,405.4 1,285.0 1,101 2,235 367.0 listen
|
| 233 |
+
0.0 14,610 4 3,652.5 3,564.5 2,768 4,713 801.6 flock
|
| 234 |
+
0.0 13,040 3 4,346.7 4,329.0 4,270 4,441 86.9 fputs_unlocked
|
| 235 |
+
0.0 11,981 5 2,396.2 2,771.0 1,202 2,927 711.6 mprotect
|
| 236 |
+
0.0 9,480 4 2,370.0 2,249.5 1,906 3,075 510.7 flockfile
|
| 237 |
+
0.0 6,404 1 6,404.0 6,404.0 6,404 6,404 0.0 kill
|
| 238 |
+
0.0 6,248 4 1,562.0 1,573.0 1,113 1,989 492.3 fstat
|
| 239 |
+
0.0 4,499 2 2,249.5 2,249.5 1,682 2,817 802.6 openat64
|
| 240 |
+
0.0 2,859 1 2,859.0 2,859.0 2,859 2,859 0.0 fputs
|
| 241 |
+
0.0 1,224 1 1,224.0 1,224.0 1,224 1,224 0.0 pthread_mutex_trylock
|
| 242 |
+
|
| 243 |
+
[5/8] Executing 'cuda_api_sum' stats report
|
| 244 |
+
|
| 245 |
+
Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
|
| 246 |
+
-------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
|
| 247 |
+
34.3 812,140,032 3,083 263,425.2 6,428.0 2,928 98,188,542 1,916,618.2 cudaMemcpyAsync
|
| 248 |
+
21.8 516,021,139 36,947 13,966.5 6,360.0 775 58,987,857 445,775.0 cudaLaunchKernel
|
| 249 |
+
9.2 218,253,465 1,943 112,328.1 73,036.0 42,162 1,493,412 185,303.5 cudaGraphInstantiateWithFlags_v11040
|
| 250 |
+
8.5 199,893,902 1,066 187,517.7 3,358.5 1,531 143,716,638 4,421,443.3 cudaStreamSynchronize
|
| 251 |
+
7.6 180,431,780 2,135 84,511.4 31,189.0 5,473 114,045,672 2,467,576.1 cudaDeviceSynchronize
|
| 252 |
+
5.7 135,517,434 9,541 14,203.7 12,994.0 7,873 57,365 3,806.6 cudaGraphLaunch_v10000
|
| 253 |
+
4.4 103,077,576 32,794 3,143.2 3,734.0 625 42,966 2,040.8 cuLaunchKernel
|
| 254 |
+
2.3 53,907,221 222 242,825.3 116,370.0 60,442 2,394,882 358,819.3 cudaFree
|
| 255 |
+
1.7 40,503,891 348 116,390.5 108,995.5 4,264 1,127,684 59,276.4 cudaMalloc
|
| 256 |
+
1.1 25,697,655 10 2,569,765.5 2,671,025.0 56,954 4,429,810 1,454,311.0 cuLibraryLoadData
|
| 257 |
+
0.7 16,202,477 5,650 2,867.7 1,890.0 203 217,650 3,509.8 cudaMemsetAsync
|
| 258 |
+
0.4 10,179,971 9,641 1,055.9 400.0 260 4,173,119 44,067.9 cuKernelGetFunction
|
| 259 |
+
0.4 10,058,945 169 59,520.4 63,100.0 27,735 269,824 28,306.4 cuModuleLoadData
|
| 260 |
+
0.4 10,043,958 11,842 848.2 789.0 324 10,495 244.9 cudaStreamIsCapturing_v10000
|
| 261 |
+
0.4 9,094,302 18,895 481.3 447.0 292 6,998 140.7 cudaStreamGetCaptureInfo_v2_v11030
|
| 262 |
+
0.3 8,147,148 1,943 4,193.1 4,074.0 3,220 20,274 777.0 cudaStreamBeginCapture_v10000
|
| 263 |
+
0.3 7,560,536 1,943 3,891.2 3,854.0 2,420 9,751 520.1 cudaGraphDestroy_v10000
|
| 264 |
+
0.1 2,956,110 128 23,094.6 2,043.5 1,256 1,017,699 123,547.8 cudaStreamCreateWithPriority
|
| 265 |
+
0.1 2,497,642 1,943 1,285.5 1,271.0 1,001 1,848 114.9 cudaStreamEndCapture_v10000
|
| 266 |
+
0.1 1,589,818 1,943 818.2 750.0 631 2,645 247.1 cudaGraphGetNodes_v10000
|
| 267 |
+
0.1 1,331,824 14 95,130.3 5,133.0 3,547 1,267,757 337,507.7 cudaHostAlloc
|
| 268 |
+
0.0 224,235 8 28,029.4 27,965.5 8,873 65,303 19,334.0 cudaMemGetInfo
|
| 269 |
+
0.0 135,950 810 167.8 133.0 74 1,928 122.6 cuGetProcAddress_v2
|
| 270 |
+
0.0 16,525 19 869.7 347.0 273 4,275 1,102.2 cudaEventCreateWithFlags
|
| 271 |
+
0.0 13,697 15 913.1 798.0 354 2,245 499.4 cuLibraryGetKernel
|
| 272 |
+
0.0 8,006 14 571.9 561.0 298 931 170.8 cudaThreadExchangeStreamCaptureMode_v10010
|
| 273 |
+
0.0 5,200 1 5,200.0 5,200.0 5,200 5,200 0.0 cudaEventRecord
|
| 274 |
+
0.0 4,363 3 1,454.3 1,461.0 1,138 1,764 313.1 cuInit
|
| 275 |
+
0.0 3,658 1 3,658.0 3,658.0 3,658 3,658 0.0 cudaStreamWaitEvent
|
| 276 |
+
0.0 3,288 4 822.0 586.0 82 2,034 922.5 cuModuleGetLoadingMode
|
| 277 |
+
0.0 1,938 1 1,938.0 1,938.0 1,938 1,938 0.0 cudaEventDestroy
|
| 278 |
+
0.0 1,284 2 642.0 642.0 277 1,007 516.2 cudaGetDriverEntryPoint_v11030
|
| 279 |
+
|
| 280 |
+
[6/8] Executing 'cuda_gpu_kern_sum' stats report
|
| 281 |
+
|
| 282 |
+
Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
|
| 283 |
+
-------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
|
| 284 |
+
17.4 197,947,062 9,240 21,422.8 22,752.0 6,752 24,897 4,470.0 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
|
| 285 |
+
16.7 190,016,589 3,836 49,535.1 22,048.0 7,648 131,073 39,111.5 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
|
| 286 |
+
15.4 175,442,595 765 229,336.7 61,089.0 10,625 521,635 235,358.8 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
|
| 287 |
+
13.9 157,634,573 924 170,600.2 162,625.5 40,224 1,415,047 225,630.4 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
|
| 288 |
+
6.9 78,474,832 28 2,802,672.6 2,803,582.0 2,787,886 2,808,622 5,629.9 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
|
| 289 |
+
3.4 39,224,462 8 4,903,057.8 4,858,922.0 4,807,161 5,087,547 109,376.4 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
|
| 290 |
+
2.9 32,817,057 140 234,407.6 231,842.0 121,760 377,154 83,557.7 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
|
| 291 |
+
2.0 22,647,227 784 28,886.8 12,512.0 11,584 62,368 20,630.1 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
|
| 292 |
+
1.9 21,994,025 1,960 11,221.4 4,448.0 1,055 461,986 54,151.8 triton_poi_fused_mul_silu_1
|
| 293 |
+
1.8 20,374,153 4 5,093,538.3 5,082,874.0 4,907,865 5,300,540 193,781.3 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
|
| 294 |
+
1.7 19,801,839 924 21,430.6 21,408.0 21,024 21,856 89.0 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
|
| 295 |
+
1.3 14,291,848 28 510,423.1 511,794.0 469,890 513,858 7,978.2 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
|
| 296 |
+
1.2 13,732,511 9,240 1,486.2 1,472.0 1,120 2,176 99.3 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
|
| 297 |
+
1.2 13,254,405 142 93,340.9 51,632.5 50,176 3,030,607 350,548.4 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
|
| 298 |
+
0.9 10,247,270 1,960 5,228.2 3,488.0 1,536 110,881 12,591.6 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
|
| 299 |
+
0.9 9,752,980 4 2,438,245.0 2,430,621.0 2,388,877 2,502,861 56,361.3 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
|
| 300 |
+
0.8 9,147,313 28 326,689.8 326,721.5 324,257 330,242 1,117.1 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
|
| 301 |
+
0.7 8,448,874 97 87,101.8 8,865.0 7,040 498,466 160,499.7 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
|
| 302 |
+
0.7 8,400,518 224 37,502.3 37,472.5 36,384 39,168 466.0 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
|
| 303 |
+
0.7 7,806,345 2 3,903,172.5 3,903,172.5 3,707,123 4,099,222 277,255.9 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
|
| 304 |
+
0.6 7,228,323 336 21,512.9 21,440.0 21,088 22,624 351.4 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
|
| 305 |
+
0.6 6,387,562 1,960 3,259.0 2,048.0 1,535 79,712 9,056.1 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
|
| 306 |
+
0.5 6,106,951 476 12,829.7 12,800.0 11,712 14,464 734.2 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
|
| 307 |
+
0.5 5,899,904 4 1,474,976.0 1,474,552.0 1,473,800 1,477,000 1,402.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
|
| 308 |
+
0.4 4,854,373 334 14,534.1 5,823.0 5,376 714,116 76,872.2 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
|
| 309 |
+
0.4 4,379,675 140 31,283.4 27,169.0 26,176 49,952 8,312.6 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
|
| 310 |
+
0.4 4,089,246 1,890 2,163.6 1,824.0 1,344 22,400 2,259.9 triton_poi_fused_cat_3
|
| 311 |
+
0.4 3,995,829 2 1,997,914.5 1,997,914.5 1,996,875 1,998,954 1,470.1 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
|
| 312 |
+
0.3 3,591,759 56 64,138.6 64,128.5 63,040 65,344 470.6 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
|
| 313 |
+
0.3 3,536,964 332 10,653.5 4,672.0 2,143 1,004,742 77,469.7 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
|
| 314 |
+
0.3 3,467,234 4,221 821.4 800.0 767 1,280 73.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
|
| 315 |
+
0.3 3,436,144 4 859,036.0 858,500.0 856,164 862,980 3,278.8 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 316 |
+
0.3 3,187,952 2 1,593,976.0 1,593,976.0 1,571,272 1,616,680 32,108.3 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
|
| 317 |
+
0.3 2,855,430 1,512 1,888.5 1,697.0 1,312 2,880 453.6 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
|
| 318 |
+
0.2 2,699,167 1,890 1,428.1 1,152.0 863 16,609 1,775.2 triton_poi_fused_view_5
|
| 319 |
+
0.2 2,650,106 1,890 1,402.2 1,344.0 1,215 6,336 546.8 triton_poi_fused_cat_4
|
| 320 |
+
0.2 2,583,886 2 1,291,943.0 1,291,943.0 1,291,463 1,292,423 678.8 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 321 |
+
0.2 2,580,524 2 1,290,262.0 1,290,262.0 1,289,350 1,291,174 1,289.8 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
|
| 322 |
+
0.2 2,426,348 112 21,663.8 21,568.5 9,504 34,784 12,052.3 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
|
| 323 |
+
0.1 1,501,800 466 3,222.7 3,040.0 2,880 7,456 428.1 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
|
| 324 |
+
0.1 1,396,888 924 1,511.8 1,504.0 1,440 1,600 30.6 void vllm::merge_attn_states_kernel<__nv_bfloat16, (unsigned int)128>(T1 *, float *, const T1 *, co…
|
| 325 |
+
0.1 1,367,879 2 683,939.5 683,939.5 676,036 691,843 11,177.2 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
|
| 326 |
+
0.1 1,147,814 28 40,993.4 40,864.0 40,352 42,560 490.6 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
|
| 327 |
+
0.1 957,606 28 34,200.2 34,768.5 17,600 35,200 3,259.5 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
|
| 328 |
+
0.1 657,092 28 23,467.6 23,376.0 23,072 24,353 360.0 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn
|
| 329 |
+
0.0 506,203 330 1,533.9 1,568.0 1,184 1,760 118.3 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
|
| 330 |
+
0.0 417,887 332 1,258.7 1,248.0 1,120 1,695 43.2 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
|
| 331 |
+
0.0 300,134 330 909.5 896.0 895 992 22.4 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
|
| 332 |
+
0.0 295,461 168 1,758.7 1,760.0 1,536 1,953 115.4 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
|
| 333 |
+
0.0 290,763 330 881.1 865.0 863 960 23.0 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
|
| 334 |
+
0.0 257,951 70 3,685.0 3,040.0 2,048 35,776 3,957.7 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
|
| 335 |
+
0.0 230,204 258 892.3 896.0 863 960 21.7 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
|
| 336 |
+
0.0 191,518 70 2,736.0 2,112.0 1,631 40,096 4,544.2 triton_poi_fused_cat_1
|
| 337 |
+
0.0 157,536 1 157,536.0 157,536.0 157,536 157,536 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
|
| 338 |
+
0.0 100,829 70 1,440.4 1,344.0 1,216 8,768 891.7 triton_poi_fused_cat_2
|
| 339 |
+
0.0 99,231 70 1,417.6 1,184.0 863 13,793 1,532.3 triton_poi_fused_view_3
|
| 340 |
+
0.0 80,032 1 80,032.0 80,032.0 80,032 80,032 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
|
| 341 |
+
0.0 68,035 76 895.2 865.0 863 1,824 110.8 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
|
| 342 |
+
0.0 64,195 58 1,106.8 896.0 864 11,360 1,372.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
|
| 343 |
+
0.0 43,840 1 43,840.0 43,840.0 43,840 43,840 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
|
| 344 |
+
0.0 36,803 28 1,314.4 1,312.0 1,280 1,344 12.1 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
|
| 345 |
+
0.0 26,624 1 26,624.0 26,624.0 26,624 26,624 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
|
| 346 |
+
0.0 19,361 1 19,361.0 19,361.0 19,361 19,361 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 347 |
+
0.0 12,031 11 1,093.7 896.0 863 1,568 276.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
|
| 348 |
+
0.0 10,815 2 5,407.5 5,407.5 5,151 5,664 362.7 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
|
| 349 |
+
0.0 9,184 2 4,592.0 4,592.0 4,512 4,672 113.1 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
|
| 350 |
+
0.0 3,616 2 1,808.0 1,808.0 1,600 2,016 294.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
|
| 351 |
+
0.0 3,521 2 1,760.5 1,760.5 1,696 1,825 91.2 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
|
| 352 |
+
0.0 3,135 2 1,567.5 1,567.5 1,503 1,632 91.2 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
|
| 353 |
+
0.0 2,977 2 1,488.5 1,488.5 1,344 1,633 204.4 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
|
| 354 |
+
0.0 2,976 2 1,488.0 1,488.0 992 1,984 701.4 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
|
| 355 |
+
0.0 2,912 2 1,456.0 1,456.0 1,344 1,568 158.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
|
| 356 |
+
0.0 2,240 1 2,240.0 2,240.0 2,240 2,240 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
|
| 357 |
+
0.0 1,216 1 1,216.0 1,216.0 1,216 1,216 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
|
| 358 |
+
0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
|
| 359 |
+
0.0 992 1 992.0 992.0 992 992 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
|
| 360 |
+
0.0 896 1 896.0 896.0 896 896 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
|
| 361 |
+
|
| 362 |
+
[7/8] Executing 'cuda_gpu_mem_time_sum' stats report
|
| 363 |
+
|
| 364 |
+
Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
|
| 365 |
+
-------- --------------- ----- ----------- ----------- --------- ---------- ----------- ------------------------------
|
| 366 |
+
98.5 483,138,950 2,749 175,750.8 352.0 287 97,773,423 1,996,584.9 [CUDA memcpy Host-to-Device]
|
| 367 |
+
1.1 5,442,908 4 1,360,727.0 1,361,159.0 1,357,511 1,363,079 2,619.2 [CUDA memcpy Device-to-Device]
|
| 368 |
+
0.3 1,591,863 2,738 581.4 448.0 288 2,112 247.2 [CUDA memset]
|
| 369 |
+
0.1 374,720 330 1,135.5 1,120.0 864 1,760 85.7 [CUDA memcpy Device-to-Host]
|
| 370 |
+
|
| 371 |
+
[8/8] Executing 'cuda_gpu_mem_size_sum' stats report
|
| 372 |
+
|
| 373 |
+
Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
|
| 374 |
+
---------- ----- -------- -------- -------- -------- ----------- ------------------------------
|
| 375 |
+
3,090.495 2,749 1.124 0.001 0.000 466.747 10.082 [CUDA memcpy Host-to-Device]
|
| 376 |
+
2,489.319 4 622.330 622.330 622.330 622.330 0.000 [CUDA memcpy Device-to-Device]
|
| 377 |
+
2.029 2,738 0.001 0.001 0.000 0.006 0.001 [CUDA memset]
|
| 378 |
+
0.008 330 0.000 0.000 0.000 0.000 0.000 [CUDA memcpy Device-to-Host]
|
| 379 |
+
|
| 380 |
+
Generated:
|
| 381 |
+
/data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill.nsys-rep
|
| 382 |
+
/data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill.sqlite
|
traverse_bs_util_sim_prefill.nsys-rep
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b72e5b53980829e09ff4dfb58d9281aff1d6221df1e88e8ca3cf7e1976c1ee9
|
| 3 |
+
size 17266751
|
traverse_bs_util_sim_prefill.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
import statistics
|
| 4 |
+
from typing import List, Tuple, Dict
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.cuda.nvtx as nvtx
|
| 8 |
+
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
from transformers import AutoTokenizer
|
| 11 |
+
|
| 12 |
+
# ========= 强制使用 vLLM V1 =========
|
| 13 |
+
os.environ.setdefault("VLLM_USE_V1", "1")
|
| 14 |
+
os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
|
| 15 |
+
|
| 16 |
+
# 可选:打开 V1 metrics 统计
|
| 17 |
+
os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
|
| 18 |
+
|
| 19 |
+
# ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
|
| 20 |
+
try:
|
| 21 |
+
from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
|
| 22 |
+
except Exception:
|
| 23 |
+
Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
|
| 24 |
+
|
| 25 |
+
# ========= 配置 =========
|
| 26 |
+
MODEL_NAME = "Qwen/Qwen2-1.5B"
|
| 27 |
+
DTYPE = "bfloat16"
|
| 28 |
+
TP = 1
|
| 29 |
+
GPU_MEM_UTIL = 0.90
|
| 30 |
+
TRUST_REMOTE_CODE = True
|
| 31 |
+
|
| 32 |
+
# 场景:prefill=输入tokens,decode=输出tokens
|
| 33 |
+
SCENARIOS = [
|
| 34 |
+
{"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
|
| 35 |
+
# {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
|
| 36 |
+
# {"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
|
| 40 |
+
|
| 41 |
+
SEED = 1234
|
| 42 |
+
TEMPERATURE = 0.0
|
| 43 |
+
TOP_P = 1.0
|
| 44 |
+
WARMUP_PER_BS = 1 # 每个批次做一次预热
|
| 45 |
+
|
| 46 |
+
# ========= 构造“精确 token 数量”的 prompt =========
|
| 47 |
+
def build_exact_token_prompt(tokenizer, target_len: int) -> str:
|
| 48 |
+
if target_len <= 1:
|
| 49 |
+
# 最小化 prompt:用一个简单 token(避免空串导致0 token)
|
| 50 |
+
ids = tokenizer("A", add_special_tokens=False)["input_ids"]
|
| 51 |
+
if len(ids) >= 1:
|
| 52 |
+
return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 53 |
+
|
| 54 |
+
base_text = (
|
| 55 |
+
"You are a helpful assistant. "
|
| 56 |
+
"Please analyze the following input and respond succinctly. "
|
| 57 |
+
)
|
| 58 |
+
chunk = " ".join(["data"] * 100) + ". "
|
| 59 |
+
text = base_text + chunk * 200 # 足够长的文本
|
| 60 |
+
|
| 61 |
+
lo, hi = 0, len(text)
|
| 62 |
+
target_ids = None
|
| 63 |
+
while lo <= hi:
|
| 64 |
+
mid = (lo + hi) // 2
|
| 65 |
+
ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
|
| 66 |
+
if len(ids) == target_len:
|
| 67 |
+
target_ids = ids
|
| 68 |
+
break
|
| 69 |
+
if len(ids) < target_len:
|
| 70 |
+
lo = mid + 1
|
| 71 |
+
else:
|
| 72 |
+
hi = mid - 1
|
| 73 |
+
|
| 74 |
+
if target_ids is None:
|
| 75 |
+
ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
|
| 76 |
+
if len(ids) > target_len:
|
| 77 |
+
target_ids = ids[:target_len]
|
| 78 |
+
else:
|
| 79 |
+
filler = " data"
|
| 80 |
+
while len(ids) < target_len:
|
| 81 |
+
ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
|
| 82 |
+
target_ids = ids[:target_len]
|
| 83 |
+
|
| 84 |
+
prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
| 85 |
+
# 断言精确长度
|
| 86 |
+
assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
|
| 87 |
+
return prompt
|
| 88 |
+
|
| 89 |
+
# ========= V1 metrics 抽取工具 =========
|
| 90 |
+
TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
|
| 91 |
+
TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
|
| 92 |
+
|
| 93 |
+
def _iter_children_of_vector(vec_obj):
|
| 94 |
+
for attr in ("children", "metrics", "series", "values", "samples", "items"):
|
| 95 |
+
if hasattr(vec_obj, attr):
|
| 96 |
+
val = getattr(vec_obj, attr)
|
| 97 |
+
if isinstance(val, dict):
|
| 98 |
+
for v in val.values():
|
| 99 |
+
yield v
|
| 100 |
+
else:
|
| 101 |
+
try:
|
| 102 |
+
for v in val:
|
| 103 |
+
yield v
|
| 104 |
+
except TypeError:
|
| 105 |
+
pass
|
| 106 |
+
|
| 107 |
+
def _collect_hist_sum_count(metrics, metric_name: str):
|
| 108 |
+
total_sum = 0.0
|
| 109 |
+
total_count = 0.0
|
| 110 |
+
for m in metrics:
|
| 111 |
+
mname = getattr(m, "name", None)
|
| 112 |
+
if mname != metric_name:
|
| 113 |
+
continue
|
| 114 |
+
# 直接 Histogram
|
| 115 |
+
if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
|
| 116 |
+
total_sum += float(getattr(m, "sum", 0.0))
|
| 117 |
+
total_count += float(getattr(m, "count", 0.0))
|
| 118 |
+
continue
|
| 119 |
+
# Vector[Histogram]
|
| 120 |
+
if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
|
| 121 |
+
for child in _iter_children_of_vector(m):
|
| 122 |
+
if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
|
| 123 |
+
total_sum += float(getattr(child, "sum", 0.0))
|
| 124 |
+
total_count += float(getattr(child, "count", 0.0))
|
| 125 |
+
return total_sum, total_count
|
| 126 |
+
|
| 127 |
+
def _metrics_snapshot(llm) -> Dict[str, float]:
|
| 128 |
+
try:
|
| 129 |
+
mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
|
| 130 |
+
except Exception:
|
| 131 |
+
return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
|
| 132 |
+
ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
|
| 133 |
+
tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
|
| 134 |
+
return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
|
| 135 |
+
|
| 136 |
+
def _metrics_delta(before: dict, after: dict):
|
| 137 |
+
return {
|
| 138 |
+
"ttft_sum": after["ttft_sum"] - before["ttft_sum"],
|
| 139 |
+
"ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
|
| 140 |
+
"tpot_sum": after["tpot_sum"] - before["tpot_sum"],
|
| 141 |
+
"tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
# ========= 带 NVTX 的 generate 包装 =========
|
| 145 |
+
def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
|
| 146 |
+
return llm.generate(prompts, params)
|
| 147 |
+
|
| 148 |
+
# ========= 统计格式化 =========
|
| 149 |
+
def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
|
| 150 |
+
xs = [v for v in x if (v == v)] # 过滤 NaN
|
| 151 |
+
if not xs:
|
| 152 |
+
return (float("nan"), float("nan"), float("nan"))
|
| 153 |
+
return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
|
| 154 |
+
|
| 155 |
+
def main():
|
| 156 |
+
print("--- vLLM V1 基准测试(含 NVTX 标记)---")
|
| 157 |
+
print(f"模型: {MODEL_NAME}")
|
| 158 |
+
print(f"批量大小: {BATCH_SIZES}")
|
| 159 |
+
print(f"场景: {[s['name'] for s in SCENARIOS]}")
|
| 160 |
+
print("-" * 60)
|
| 161 |
+
|
| 162 |
+
if not torch.cuda.is_available():
|
| 163 |
+
print("错误:需要 CUDA GPU。")
|
| 164 |
+
return
|
| 165 |
+
|
| 166 |
+
print("加载分词器/模型中...")
|
| 167 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
|
| 168 |
+
|
| 169 |
+
# 用 NVTX 标记模型加载阶段
|
| 170 |
+
nvtx.range_push("LLM_init")
|
| 171 |
+
llm = LLM(
|
| 172 |
+
model=MODEL_NAME,
|
| 173 |
+
tensor_parallel_size=TP,
|
| 174 |
+
dtype=DTYPE,
|
| 175 |
+
trust_remote_code=TRUST_REMOTE_CODE,
|
| 176 |
+
gpu_memory_utilization=GPU_MEM_UTIL,
|
| 177 |
+
max_num_seqs=1024, # 足够覆盖本次扫描
|
| 178 |
+
max_model_len=4096,
|
| 179 |
+
disable_log_stats=False, # 开启 V1 metrics 收集
|
| 180 |
+
)
|
| 181 |
+
nvtx.range_pop()
|
| 182 |
+
print("模型加载完成。")
|
| 183 |
+
|
| 184 |
+
for sc in SCENARIOS:
|
| 185 |
+
name = sc["name"]
|
| 186 |
+
prompt_tokens = sc["prompt_tokens"]
|
| 187 |
+
max_new_tokens = sc["max_new_tokens"]
|
| 188 |
+
|
| 189 |
+
print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
|
| 190 |
+
|
| 191 |
+
# 准备精确长度 prompt
|
| 192 |
+
prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
|
| 193 |
+
|
| 194 |
+
# 采样参数(贪心)
|
| 195 |
+
sampling_params = SamplingParams(
|
| 196 |
+
max_tokens=max_new_tokens,
|
| 197 |
+
temperature=TEMPERATURE,
|
| 198 |
+
top_p=TOP_P,
|
| 199 |
+
seed=SEED,
|
| 200 |
+
n=1,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
# 记录每个 bs 的结果(便于后续统计或外部解析)
|
| 204 |
+
for bs in BATCH_SIZES:
|
| 205 |
+
print(f"\n--- 批量大小 bs={bs} ---")
|
| 206 |
+
|
| 207 |
+
prompts = [prompt_text] * bs
|
| 208 |
+
|
| 209 |
+
# 预热
|
| 210 |
+
# print("预热中...")
|
| 211 |
+
# nvtx.range_push(f"WARMUP [{name}] bs={bs}")
|
| 212 |
+
# _ = decorated_generate(llm, [prompts[0]], sampling_params)
|
| 213 |
+
# torch.cuda.synchronize()
|
| 214 |
+
# nvtx.range_pop()
|
| 215 |
+
|
| 216 |
+
# 正式计时与 V1 metrics
|
| 217 |
+
# nvtx.range_push(f"RUN [{name}] bs={bs}")
|
| 218 |
+
torch.cuda.synchronize()
|
| 219 |
+
snap_before = _metrics_snapshot(llm)
|
| 220 |
+
t0 = time.perf_counter()
|
| 221 |
+
|
| 222 |
+
nvtx.range_push(f"generate [{name}] bs={bs}")
|
| 223 |
+
outputs = decorated_generate(llm, prompts, sampling_params)
|
| 224 |
+
nvtx.range_pop() # generate
|
| 225 |
+
|
| 226 |
+
torch.cuda.synchronize()
|
| 227 |
+
t1 = time.perf_counter()
|
| 228 |
+
snap_after = _metrics_snapshot(llm)
|
| 229 |
+
# nvtx.range_pop() # RUN
|
| 230 |
+
|
| 231 |
+
duration = t1 - t0
|
| 232 |
+
|
| 233 |
+
# 统计 token 与吞吐
|
| 234 |
+
total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
|
| 235 |
+
avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
|
| 236 |
+
throughput = total_output_tokens / duration if duration > 0 else float("inf")
|
| 237 |
+
|
| 238 |
+
# 解析 V1 TTFT / 解码吞吐
|
| 239 |
+
delta = _metrics_delta(snap_before, snap_after)
|
| 240 |
+
if delta["ttft_cnt"] > 0:
|
| 241 |
+
ttft = delta["ttft_sum"] / delta["ttft_cnt"]
|
| 242 |
+
else:
|
| 243 |
+
ttft = float("nan")
|
| 244 |
+
|
| 245 |
+
if delta["tpot_cnt"] > 0:
|
| 246 |
+
avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
|
| 247 |
+
decode_tps = 1.0 / avg_tpot
|
| 248 |
+
else:
|
| 249 |
+
decode_tps = float("nan")
|
| 250 |
+
|
| 251 |
+
print(f"执行时间: {duration:.4f} s")
|
| 252 |
+
print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
|
| 253 |
+
print(f"生成总 tokens: {total_output_tokens}")
|
| 254 |
+
print(f"吞吐(生成tokens/秒): {throughput:.2f}")
|
| 255 |
+
print(f"TTFT (V1 metrics): {ttft:.4f} s")
|
| 256 |
+
print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
|
| 257 |
+
|
| 258 |
+
print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
|
| 259 |
+
|
| 260 |
+
if __name__ == "__main__":
|
| 261 |
+
print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
|
| 262 |
+
main()
|