Hamerlate commited on
Commit
b8fdc8d
·
verified ·
1 Parent(s): c3680e4

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -67,3 +67,8 @@ sim_traverse_bs/traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -tex
67
  traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
68
  traverse_bs_util_sim_decoding.nsys-rep filter=lfs diff=lfs merge=lfs -text
69
  traverse_bs_util_sim_prefill.nsys-rep filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
67
  traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
68
  traverse_bs_util_sim_decoding.nsys-rep filter=lfs diff=lfs merge=lfs -text
69
  traverse_bs_util_sim_prefill.nsys-rep filter=lfs diff=lfs merge=lfs -text
70
+ sim_traverse_bs/traverse_bs_util_sim_decoding.nsys-rep filter=lfs diff=lfs merge=lfs -text
71
+ sim_traverse_bs/traverse_bs_util_sim_decoding_1024.nsys-rep filter=lfs diff=lfs merge=lfs -text
72
+ sim_traverse_bs/traverse_bs_util_sim_prefill.nsys-rep filter=lfs diff=lfs merge=lfs -text
73
+ sim_traverse_bs/traverse_bs_util_sim_prefill_1152.nsys-rep filter=lfs diff=lfs merge=lfs -text
74
+ std_traverse_bs/traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
bench_vllm_v1.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import statistics
4
+ from typing import List, Tuple
5
+
6
+ from vllm import LLM, SamplingParams
7
+ from transformers import AutoTokenizer
8
+
9
+ # ==== 关键:V1 开关(也可在外部 export)====
10
+ os.environ.setdefault("VLLM_USE_V1", "1")
11
+
12
+ # 可选:多进程安全(官方建议的 spawn)
13
+ os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
14
+
15
+ # V1 metrics 类型(Counter/Gauge/Histogram/Vector)
16
+ try:
17
+ from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
18
+ except Exception:
19
+ # 老版本兜底:若导入失败,后面代码会用 hasattr/反射方式尽力兼容
20
+ Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
21
+
22
+ # =============== 可调参数 ===============
23
+ TP = 1
24
+ DTYPE = "bfloat16"
25
+ GPU_MEM_UTIL = 0.9
26
+ WARMUP = 2
27
+ RUNS = 5
28
+ INPUT_TOKENS = 1024
29
+ OUTPUT_TOKENS = 128
30
+ SEED = 1234
31
+ TEMPERATURE = 0.0
32
+ MODELS = [
33
+ # "meta-llama/Meta-Llama-3-70B",
34
+ # "meta-llama/Meta-Llama-3-8B",
35
+ "Qwen/Qwen2-1.5B",
36
+ ]
37
+ # =====================================
38
+
39
+ def build_exact_token_prompt(tokenizer, target_len: int) -> str:
40
+ base_text = "You are a helpful assistant. " \
41
+ "Please analyze the following input and respond succinctly. "
42
+ chunk = " ".join(["data"] * 100) + ". "
43
+ text = base_text + chunk * 200
44
+
45
+ lo, hi = 0, len(text)
46
+ target_ids = None
47
+ while lo <= hi:
48
+ mid = (lo + hi) // 2
49
+ ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
50
+ if len(ids) == target_len:
51
+ target_ids = ids
52
+ break
53
+ if len(ids) < target_len:
54
+ lo = mid + 1
55
+ else:
56
+ hi = mid - 1
57
+
58
+ if target_ids is None:
59
+ ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
60
+ if len(ids) > target_len:
61
+ target_ids = ids[:target_len]
62
+ else:
63
+ filler = " data"
64
+ while len(ids) < target_len:
65
+ ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
66
+ target_ids = ids[:target_len]
67
+
68
+ prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
69
+ assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
70
+ return prompt
71
+
72
+ # --------- V1 metrics 抽取工具 ---------
73
+ TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
74
+ TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
75
+
76
+ def _iter_children_of_vector(vec_obj):
77
+ """
78
+ 兼容不同版本 Vector 的内部字段名(children/metrics/series/...)。
79
+ """
80
+ for attr in ("children", "metrics", "series", "values", "samples", "items"):
81
+ if hasattr(vec_obj, attr):
82
+ val = getattr(vec_obj, attr)
83
+ if isinstance(val, dict):
84
+ for v in val.values():
85
+ yield v
86
+ else:
87
+ try:
88
+ for v in val:
89
+ yield v
90
+ except TypeError:
91
+ pass
92
+
93
+ def _collect_hist_sum_count(metrics, metric_name: str):
94
+ """
95
+ 聚合(可能带 labels 的)Histogram 的 sum 与 count。
96
+ 既兼容直接 Histogram,也兼容 Vector[Histogram] 形式。
97
+ """
98
+ total_sum = 0.0
99
+ total_count = 0.0
100
+ for m in metrics:
101
+ mname = getattr(m, "name", None)
102
+ if mname != metric_name:
103
+ continue
104
+
105
+ # 直接 Histogram
106
+ if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
107
+ total_sum += float(getattr(m, "sum", 0.0))
108
+ total_count += float(getattr(m, "count", 0.0))
109
+ continue
110
+
111
+ # Vector[Histogram]
112
+ if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
113
+ for child in _iter_children_of_vector(m):
114
+ if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
115
+ total_sum += float(getattr(child, "sum", 0.0))
116
+ total_count += float(getattr(child, "count", 0.0))
117
+ return total_sum, total_count
118
+
119
+ def _metrics_snapshot(llm):
120
+ """拿一份快照,并解析出 TTFT/TPOT 的 (sum, count)。"""
121
+ mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
122
+ ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
123
+ tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
124
+ return {
125
+ "ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt,
126
+ "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt,
127
+ }
128
+
129
+ def _metrics_delta(before: dict, after: dict):
130
+ """计算 after - before 的增量。"""
131
+ return {
132
+ "ttft_sum": after["ttft_sum"] - before["ttft_sum"],
133
+ "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
134
+ "tpot_sum": after["tpot_sum"] - before["tpot_sum"],
135
+ "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
136
+ }
137
+
138
+ def measure_one_model(model_name: str) -> None:
139
+ print(f"\n===== Testing model: {model_name} | TP={TP} | dtype={DTYPE} (V1 metrics) =====")
140
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, trust_remote_code=True)
141
+ prompt = build_exact_token_prompt(tokenizer, INPUT_TOKENS)
142
+
143
+ llm = LLM(
144
+ model=model_name,
145
+ tensor_parallel_size=TP,
146
+ dtype=DTYPE,
147
+ trust_remote_code=True,
148
+ gpu_memory_utilization=GPU_MEM_UTIL,
149
+ max_num_seqs=1,
150
+ max_model_len=8192,
151
+ disable_log_stats=False, # <<< 加这一行
152
+ )
153
+
154
+ sampling_params = SamplingParams(
155
+ max_tokens=OUTPUT_TOKENS,
156
+ temperature=TEMPERATURE,
157
+ top_p=1.0,
158
+ seed=SEED,
159
+ n=1,
160
+ stop=None
161
+ )
162
+
163
+ # 预热
164
+ for _ in range(WARMUP):
165
+ _ = llm.generate([prompt], sampling_params)
166
+
167
+ # 统计容器
168
+ latencies: List[float] = []
169
+ out_token_speeds: List[float] = []
170
+ total_token_speeds: List[float] = []
171
+ ttft_list: List[float] = [] # 从 V1 metrics 读出的 TTFT(秒)
172
+ decode_tps_list: List[float] = [] # 从 V1 metrics 读出的 decoding tokens/s
173
+
174
+ for i in range(RUNS):
175
+ # 取前快照
176
+ snap_before = _metrics_snapshot(llm)
177
+
178
+ # 发请求并计时(方便对照)
179
+ t0 = time.perf_counter()
180
+ outputs = llm.generate([prompt], sampling_params)
181
+ t1 = time.perf_counter()
182
+ dur = t1 - t0
183
+
184
+ # 取后快照
185
+ snap_after = _metrics_snapshot(llm)
186
+ delta = _metrics_delta(snap_before, snap_after)
187
+
188
+ # 解析本轮的 TTFT 与 decoding tokens/s
189
+ # avg_ttft = sum_delta / count_delta
190
+ if delta["ttft_cnt"] > 0:
191
+ ttft = delta["ttft_sum"] / delta["ttft_cnt"]
192
+ ttft_list.append(ttft)
193
+ else:
194
+ ttft_list.append(float("nan"))
195
+
196
+ # avg_time_per_output_token = sum_delta / count_delta
197
+ if delta["tpot_cnt"] > 0:
198
+ avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds / token
199
+ decode_tokens_per_s = 1.0 / avg_tpot
200
+ decode_tps_list.append(decode_tokens_per_s)
201
+ else:
202
+ decode_tps_list.append(float("nan"))
203
+
204
+ # 与原脚本同口径的吞吐统计(供对照)
205
+ text_out = outputs[0].outputs[0].text
206
+ out_token_cnt = len(tokenizer(text_out, add_special_tokens=False)["input_ids"])
207
+ latencies.append(dur)
208
+ out_token_speeds.append(out_token_cnt / dur if dur > 0 else float("inf"))
209
+ total_token_speeds.append((INPUT_TOKENS + out_token_cnt) / dur if dur > 0 else float("inf"))
210
+
211
+ print(
212
+ f"Run {i+1}/{RUNS}: latency={dur:.3f}s, "
213
+ f"gen_tokens={out_token_cnt}, "
214
+ f"gen_toks/s(raw)={out_token_speeds[-1]:.2f}, "
215
+ f"TTFT(V1)={ttft_list[-1]:.3f}s, "
216
+ f"decode_toks/s(V1)={decode_tps_list[-1]:.2f}"
217
+ )
218
+
219
+ def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
220
+ xs = [v for v in x if (v == v)] # 过滤 NaN
221
+ if not xs:
222
+ return (float("nan"), float("nan"), float("nan"))
223
+ return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
224
+
225
+ lat_mean, lat_med, lat_p90 = fmt_stats(latencies)
226
+ gen_mean, gen_med, gen_p90 = fmt_stats(out_token_speeds)
227
+ tot_mean, tot_med, tot_p90 = fmt_stats(total_token_speeds)
228
+ ttft_mean, ttft_med, ttft_p90 = fmt_stats(ttft_list)
229
+ dtps_mean, dtps_med, dtps_p90 = fmt_stats(decode_tps_list)
230
+
231
+ print("\n--- Summary ---")
232
+ print(f"Latency (s) : mean={lat_mean:.3f}, median={lat_med:.3f}, p90={lat_p90:.3f}")
233
+ print(f"Gen tok/s (raw) : mean={gen_mean:.2f}, median={gen_med:.2f}, p90={gen_p90:.2f}")
234
+ print(f"Total tok/s (raw) : mean={tot_mean:.2f}, median={tot_med:.2f}, p90={tot_p90:.2f}")
235
+ print(f"TTFT (V1 metrics, s): mean={ttft_mean:.3f}, median={ttft_med:.3f}, p90={ttft_p90:.3f}")
236
+ print(f"Decode tok/s (V1) : mean={dtps_mean:.2f}, median={dtps_med:.2f}, p90={dtps_p90:.2f}")
237
+ print("(V1 的 TTFT/Decode tok/s 来自 Prometheus 直方图指标;raw 为你原先用时长推算的数值)")
238
+
239
+ if __name__ == "__main__":
240
+ print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
241
+ for m in MODELS:
242
+ measure_one_model(m)
sim_traverse_bs/kv_cache_vs_util_gb.pdf ADDED
Binary file (19.9 kB). View file
 
sim_traverse_bs/kv_cache_vs_util_gb.png ADDED

Git LFS Details

  • SHA256: 7dfdf8854aa0ddf4469bc84306e7dc2f65b920df7d97b157ed2e36bd931dad82
  • Pointer size: 131 Bytes
  • Size of remote file: 279 kB
sim_traverse_bs/plot.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+
4
+ def kv_cache_size_bytes(bs, seq):
5
+ """计算 Qwen2ForCausalLM 模型的 KV cache 大小(字节)"""
6
+ n_layers = 28
7
+ n_kv_heads = 2
8
+ hidden_size = 1536
9
+ num_attention_heads = 12
10
+ head_dim = hidden_size // num_attention_heads
11
+ bytes_per_elem = 2 # bfloat16
12
+ return bs * seq * n_kv_heads * head_dim * 2 * n_layers * bytes_per_elem
13
+
14
+ # ===== Prefill 数据 =====
15
+ # seq=640
16
+ avg_ns_prefill_640 = np.array([
17
+ 1418534991, 1033939782, 494241378, 252646178, 135127951,
18
+ 69450854, 42089250, 35031755, 28423503, 22718985, 15558545
19
+ ], dtype=np.float64)
20
+ bs_prefill_640 = np.array([1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1])
21
+ seq_prefill_640 = 640
22
+ throughput_prefill_640 = bs_prefill_640 * seq_prefill_640 / (avg_ns_prefill_640 * 1e-9)
23
+ norm_prefill_640 = throughput_prefill_640 / throughput_prefill_640.max()
24
+ kv_prefill_640 = kv_cache_size_bytes(bs_prefill_640, seq_prefill_640) / 1024**3 # 改成 GB
25
+
26
+ # seq=1152
27
+ # avg_ns_prefill_1152 = np.array([
28
+ # 2423366261, 1417657581, 1040325509, 389808438, 200051514,
29
+ # 110723882, 61663721, 39934013, 30382401, 21185162, 15818426
30
+ # ], dtype=np.float64)
31
+ # bs_prefill_1152 = np.array([1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1])
32
+ avg_ns_prefill_1152 = np.array([
33
+ 1417657581, 1040325509, 389808438, 200051514,
34
+ 110723882, 61663721, 39934013, 30382401, 21185162, 15818426
35
+ ], dtype=np.float64)
36
+ bs_prefill_1152 = np.array([ 512, 256, 128, 64, 32, 16, 8, 4, 2, 1])
37
+ seq_prefill_1152 = 1152
38
+ throughput_prefill_1152 = bs_prefill_1152 * seq_prefill_1152 / (avg_ns_prefill_1152 * 1e-9)
39
+ norm_prefill_1152 = throughput_prefill_1152 / throughput_prefill_1152.max()
40
+ kv_prefill_1152 = kv_cache_size_bytes(bs_prefill_1152, seq_prefill_1152) / 1024**3 # 改成 GB
41
+
42
+ # ===== Decoding 数据 =====
43
+ # seq=512
44
+ avg_ns_decoding_512 = np.array([
45
+ 25285906551, 12679311252, 7440608085, 4841914697, 4272889441,
46
+ 3997075015, 3825710172, 3726603655, 3648294896, 3635960724, 3319210677
47
+ ], dtype=np.float64)
48
+ bs_decoding_512 = np.array([1024, 512, 256, 128, 64, 32, 16, 8, 2, 4, 1])
49
+ seq_decoding_512 = 512
50
+ throughput_decoding_512 = bs_decoding_512 * seq_decoding_512 / (avg_ns_decoding_512 * 1e-9)
51
+ norm_decoding_512 = throughput_decoding_512 / throughput_decoding_512.max()
52
+ kv_decoding_512 = kv_cache_size_bytes(bs_decoding_512, seq_decoding_512) / 1024**3 # 改成 GB
53
+
54
+ # seq=1024
55
+ # avg_ns_decoding_1024 = np.array([
56
+ # 65019709544, 30598009745, 16552100314, 11165129518, 8835508288,
57
+ # 8037503827, 8020861613, 7502439278, 7275415153, 7204870191, 6423331403
58
+ # ], dtype=np.float64)
59
+ # bs_decoding_1024 = np.array([1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1])
60
+ avg_ns_decoding_1024 = np.array([
61
+ 30598009745, 16552100314, 11165129518, 8835508288,
62
+ 8037503827, 8020861613, 7502439278, 7275415153, 7204870191, 6423331403
63
+ ], dtype=np.float64)
64
+ bs_decoding_1024 = np.array([ 512, 256, 128, 64, 32, 16, 8, 4, 2, 1])
65
+ seq_decoding_1024 = 1024
66
+ throughput_decoding_1024 = bs_decoding_1024 * seq_decoding_1024 / (avg_ns_decoding_1024 * 1e-9)
67
+ norm_decoding_1024 = throughput_decoding_1024 / throughput_decoding_1024.max()
68
+ kv_decoding_1024 = kv_cache_size_bytes(bs_decoding_1024, seq_decoding_1024) / 1024**3 # 改成 GB
69
+
70
+ # ===== 绘图 =====
71
+ fig, axes = plt.subplots(1, 2, figsize=(14, 5))
72
+
73
+ # Prefill 子图
74
+ axes[0].plot(kv_prefill_640, norm_prefill_640, marker='o', label="seq=640")
75
+ axes[0].plot(kv_prefill_1152, norm_prefill_1152, marker='s', label="seq=1152")
76
+ axes[0].set_xscale('log')
77
+ axes[0].set_xlabel("KV Cache Size (GB, log scale)")
78
+ axes[0].set_ylabel("Normalized GPU Utilization")
79
+ axes[0].set_title("Prefill")
80
+ axes[0].grid(True, which="both", ls="--", alpha=0.5)
81
+ axes[0].legend()
82
+
83
+ # Decoding 子图
84
+ axes[1].plot(kv_decoding_512, norm_decoding_512, marker='o', label="seq=512")
85
+ axes[1].plot(kv_decoding_1024, norm_decoding_1024, marker='s', label="seq=1024")
86
+ axes[1].set_xscale('log')
87
+ axes[1].set_xlabel("KV Cache Size (GB, log scale)")
88
+ axes[1].set_ylabel("Normalized GPU Utilization")
89
+ axes[1].set_title("Decoding")
90
+ axes[1].grid(True, which="both", ls="--", alpha=0.5)
91
+ axes[1].legend()
92
+
93
+ plt.suptitle("Normalized GPU Utilization vs KV Cache Size")
94
+ plt.savefig("kv_cache_vs_util_gb.png", dpi=300, bbox_inches='tight')
95
+ plt.savefig("kv_cache_vs_util_gb.pdf", dpi=300, bbox_inches='tight')
96
+ plt.show()
sim_traverse_bs/traverse_bs_util_sim_decoding.log ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WARNING: CPU IP/backtrace sampling not supported, disabling.
2
+ Try the 'nsys status --environment' command to learn more.
3
+
4
+ WARNING: CPU context switch tracing not supported, disabling.
5
+ Try the 'nsys status --environment' command to learn more.
6
+
7
+ INFO 08-13 19:21:37 [__init__.py:235] Automatically detected platform cuda.
8
+ CUDA_VISIBLE_DEVICES = 3
9
+ --- vLLM V1 基准测试(含 NVTX 标记)---
10
+ 模型: Qwen/Qwen2-1.5B
11
+ 批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
12
+ 场景: ['prefill1_decode512']
13
+ ------------------------------------------------------------
14
+ 加载分词器/模型中...
15
+ INFO 08-13 19:21:46 [config.py:1604] Using max model len 4096
16
+ INFO 08-13 19:21:47 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
17
+ INFO 08-13 19:21:52 [__init__.py:235] Automatically detected platform cuda.
18
+ INFO 08-13 19:21:54 [core.py:572] Waiting for init message from front-end.
19
+ INFO 08-13 19:21:54 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
20
+ INFO 08-13 19:21:56 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
21
+ WARNING 08-13 19:21:56 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
22
+ INFO 08-13 19:21:56 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
23
+ INFO 08-13 19:21:56 [gpu_model_runner.py:1875] Loading model from scratch...
24
+ INFO 08-13 19:21:56 [cuda.py:290] Using Flash Attention backend on V1 engine.
25
+ INFO 08-13 19:21:57 [weight_utils.py:296] Using model weights format ['*.safetensors']
26
+ INFO 08-13 19:21:57 [weight_utils.py:349] No model.safetensors.index.json found in remote.
27
+
28
+
29
+
30
+
31
+ INFO 08-13 19:21:58 [default_loader.py:262] Loading weights took 0.63 seconds
32
+ INFO 08-13 19:21:58 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 1.878581 seconds
33
+ INFO 08-13 19:22:04 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
34
+ INFO 08-13 19:22:04 [backends.py:541] Dynamo bytecode transform time: 5.72 s
35
+ INFO 08-13 19:22:09 [backends.py:161] Directly load the compiled graph(s) for dynamic shape from the cache, took 4.036 s
36
+ INFO 08-13 19:22:10 [monitor.py:34] torch.compile takes 5.72 s in total
37
+ INFO 08-13 19:22:11 [gpu_worker.py:255] Available KV cache memory: 12.81 GiB
38
+ INFO 08-13 19:22:11 [kv_cache_utils.py:833] GPU KV cache size: 479,536 tokens
39
+ INFO 08-13 19:22:11 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.07x
40
+
41
+ INFO 08-13 19:22:13 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
42
+ INFO 08-13 19:22:13 [core.py:193] init engine (profile, create kv cache, warmup model) took 14.62 seconds
43
+ 模型加载完成。
44
+
45
+ ===== 场景:prefill1_decode512 | prefill=1, decode=512 =====
46
+
47
+ --- 批量大小 bs=1 ---
48
+
49
+
50
+ 执行时间: 3.3194 s
51
+ 实际平均输入 tokens: 1.00(目标 1)
52
+ 生成总 tokens: 512
53
+ 吞吐(生成tokens/秒): 154.24
54
+ TTFT (V1 metrics): 0.0190 s
55
+ 解码吞吐 (V1 metrics): 155.07 tok/s
56
+
57
+ --- 批量大小 bs=2 ---
58
+
59
+
60
+ 执行时间: 3.6484 s
61
+ 实际平均输入 tokens: 1.00(目标 1)
62
+ 生成总 tokens: 1024
63
+ 吞吐(生成tokens/秒): 280.67
64
+ TTFT (V1 metrics): 0.0120 s
65
+ 解码吞吐 (V1 metrics): 140.73 tok/s
66
+
67
+ --- 批量大小 bs=4 ---
68
+
69
+
70
+ 执行时间: 3.6361 s
71
+ 实际平均输入 tokens: 1.00(目标 1)
72
+ 生成总 tokens: 2048
73
+ 吞吐(生成tokens/秒): 563.24
74
+ TTFT (V1 metrics): 0.0140 s
75
+ 解码吞吐 (V1 metrics): 141.25 tok/s
76
+
77
+ --- 批量大小 bs=8 ---
78
+
79
+
80
+ 执行时间: 3.7267 s
81
+ 实际平均输入 tokens: 1.00(目标 1)
82
+ 生成总 tokens: 4096
83
+ 吞吐(生成tokens/秒): 1099.08
84
+ TTFT (V1 metrics): 0.0149 s
85
+ 解码吞吐 (V1 metrics): 137.87 tok/s
86
+
87
+ --- 批量大小 bs=16 ---
88
+
89
+
90
+ 执行时间: 3.8260 s
91
+ 实际平均输入 tokens: 1.00(目标 1)
92
+ 生成总 tokens: 8192
93
+ 吞吐(生成tokens/秒): 2141.13
94
+ TTFT (V1 metrics): 0.0136 s
95
+ 解码吞吐 (V1 metrics): 134.33 tok/s
96
+
97
+ --- 批量大小 bs=32 ---
98
+
99
+
100
+ 执行时间: 3.9972 s
101
+ 实际平均输入 tokens: 1.00(目标 1)
102
+ 生成总 tokens: 16384
103
+ 吞吐(生成tokens/秒): 4098.85
104
+ TTFT (V1 metrics): 0.0164 s
105
+ 解码吞吐 (V1 metrics): 128.92 tok/s
106
+
107
+ --- 批量大小 bs=64 ---
108
+
109
+
110
+ 执行时间: 4.2731 s
111
+ 实际平均输入 tokens: 1.00(目标 1)
112
+ 生成总 tokens: 32768
113
+ 吞吐(生成tokens/秒): 7668.51
114
+ TTFT (V1 metrics): 0.0198 s
115
+ 解码吞吐 (V1 metrics): 120.83 tok/s
116
+
117
+ --- 批量大小 bs=128 ---
118
+
119
+
120
+ 执行时间: 4.8421 s
121
+ 实际平均输入 tokens: 1.00(目标 1)
122
+ 生成总 tokens: 65536
123
+ 吞吐(生成tokens/秒): 13534.75
124
+ TTFT (V1 metrics): 0.0316 s
125
+ 解码吞吐 (V1 metrics): 107.35 tok/s
126
+
127
+ --- 批量大小 bs=256 ---
128
+
129
+
130
+ 执行时间: 7.4408 s
131
+ 实际平均输入 tokens: 1.00(目标 1)
132
+ 生成总 tokens: 131072
133
+ 吞吐(生成tokens/秒): 17615.41
134
+ TTFT (V1 metrics): 0.0433 s
135
+ 解码吞吐 (V1 metrics): 69.70 tok/s
136
+
137
+ --- 批量大小 bs=512 ---
138
+
139
+
140
+ 执行时间: 12.6794 s
141
+ 实际平均输入 tokens: 1.00(目标 1)
142
+ 生成总 tokens: 262144
143
+ 吞吐(生成tokens/秒): 20674.72
144
+ TTFT (V1 metrics): 0.1809 s
145
+ 解码吞吐 (V1 metrics): 42.38 tok/s
146
+
147
+ --- 批量大小 bs=1024 ---
148
+
149
+
150
+ [rank0]:[W813 19:23:32.135663883 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
151
+ 执行时间: 25.2865 s
152
+ 实际平均输入 tokens: 1.00(目标 1)
153
+ 生成总 tokens: 524288
154
+ 吞吐(生成tokens/秒): 20733.89
155
+ TTFT (V1 metrics): 0.1191 s
156
+ 解码吞吐 (V1 metrics): 20.77 tok/s
157
+
158
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
159
+ GPU 3: General Metrics for NVIDIA AD10x (any frequency)
160
+ Generating '/tmp/nsys-report-bd68.qdstrm'
161
+
162
+
163
+ [3/8] Executing 'nvtx_sum' stats report
164
+
165
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
166
+ -------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- --------------------------------------
167
+ 31.5 35,202,819,763 1 35,202,819,763.0 35,202,819,763.0 35,202,819,763 35,202,819,763 0.0 PushPop :LLM_init
168
+ 22.6 25,285,906,551 1 25,285,906,551.0 25,285,906,551.0 25,285,906,551 25,285,906,551 0.0 PushPop :generate [prefill1_decode512] bs=1024
169
+ 11.3 12,679,311,252 1 12,679,311,252.0 12,679,311,252.0 12,679,311,252 12,679,311,252 0.0 PushPop :generate [prefill1_decode512] bs=512
170
+ 6.7 7,440,608,085 1 7,440,608,085.0 7,440,608,085.0 7,440,608,085 7,440,608,085 0.0 PushPop :generate [prefill1_decode512] bs=256
171
+ 4.3 4,841,914,697 1 4,841,914,697.0 4,841,914,697.0 4,841,914,697 4,841,914,697 0.0 PushPop :generate [prefill1_decode512] bs=128
172
+ 3.8 4,272,889,441 1 4,272,889,441.0 4,272,889,441.0 4,272,889,441 4,272,889,441 0.0 PushPop :generate [prefill1_decode512] bs=64
173
+ 3.6 3,997,075,015 1 3,997,075,015.0 3,997,075,015.0 3,997,075,015 3,997,075,015 0.0 PushPop :generate [prefill1_decode512] bs=32
174
+ 3.4 3,825,710,172 1 3,825,710,172.0 3,825,710,172.0 3,825,710,172 3,825,710,172 0.0 PushPop :generate [prefill1_decode512] bs=16
175
+ 3.3 3,726,603,655 1 3,726,603,655.0 3,726,603,655.0 3,726,603,655 3,726,603,655 0.0 PushPop :generate [prefill1_decode512] bs=8
176
+ 3.3 3,648,294,896 1 3,648,294,896.0 3,648,294,896.0 3,648,294,896 3,648,294,896 0.0 PushPop :generate [prefill1_decode512] bs=2
177
+ 3.2 3,635,960,724 1 3,635,960,724.0 3,635,960,724.0 3,635,960,724 3,635,960,724 0.0 PushPop :generate [prefill1_decode512] bs=4
178
+ 3.0 3,319,210,677 1 3,319,210,677.0 3,319,210,677.0 3,319,210,677 3,319,210,677 0.0 PushPop :generate [prefill1_decode512] bs=1
179
+ 0.0 90,630 2 45,315.0 45,315.0 41,468 49,162 5,440.5 PushPop CCCL:cub::DeviceSegmentedRadixSort
180
+
181
+ [4/8] Executing 'osrt_sum' stats report
182
+
183
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
184
+ -------- ----------------- --------- --------------- ---------------- --------- -------------- ---------------- ----------------------
185
+ 29.7 1,284,023,267,442 49,709 25,830,800.6 28,820.0 1,000 96,534,138,823 969,651,617.8 pthread_cond_timedwait
186
+ 24.2 1,045,773,118,476 73,135 14,299,215.4 10,062,843.0 1,010 81,708,518,315 463,992,514.8 epoll_wait
187
+ 23.9 1,031,963,983,489 549 1,879,715,816.9 15,827.0 1,644 96,535,575,076 13,212,126,179.1 pthread_cond_wait
188
+ 8.3 357,289,068,553 57 6,268,229,272.9 10,000,073,611.0 10,419 10,000,146,360 4,730,020,985.8 sem_timedwait
189
+ 8.2 355,562,427,280 39,343 9,037,501.6 1,512.0 1,000 12,219,368,064 127,255,975.8 poll
190
+ 3.0 131,715,516,186 11,467 11,486,484.4 7,170,376.0 28,456 585,263,318 14,022,810.8 sem_wait
191
+ 2.6 112,179,081,690 41,286 2,717,121.6 2,213.0 1,000 94,667,308,194 468,888,772.5 read
192
+ 0.0 793,423,700 330 2,404,314.2 1,354,549.5 1,900 18,406,079 2,639,127.9 pthread_rwlock_wrlock
193
+ 0.0 494,035,258 199,029 2,482.2 1,380.0 1,000 72,123,224 161,811.1 munmap
194
+ 0.0 298,099,876 8,608 34,630.6 10,089.5 1,002 29,694,619 390,579.6 ioctl
195
+ 0.0 220,903,685 369 598,655.0 2,510.0 1,159 22,536,588 3,264,985.3 fopen
196
+ 0.0 121,576,605 24 5,065,691.9 5,064,718.5 5,053,737 5,087,479 7,589.0 nanosleep
197
+ 0.0 110,429,690 30,645 3,603.5 2,536.0 1,000 19,587,041 111,890.3 open64
198
+ 0.0 88,325,670 79 1,118,046.5 3,103.0 1,011 81,521,546 9,166,201.0 waitpid
199
+ 0.0 76,471,088 18,154 4,212.4 3,660.0 1,000 1,659,563 15,417.3 mmap64
200
+ 0.0 74,586,704 96 776,944.8 3,874.0 1,020 19,635,272 3,727,062.1 open
201
+ 0.0 72,521,171 8,897 8,151.2 4,707.0 1,022 2,826,985 34,660.7 recv
202
+ 0.0 71,841,066 8,895 8,076.6 5,429.0 1,571 84,986 7,239.5 send
203
+ 0.0 69,801,955 41,067 1,699.7 1,627.0 1,000 32,060 794.7 pthread_cond_signal
204
+ 0.0 67,085,379 39 1,720,137.9 470,979.0 3,544 10,373,042 3,340,509.2 pthread_join
205
+ 0.0 56,617,564 10 5,661,756.4 18,705.5 8,315 56,388,994 17,823,747.2 connect
206
+ 0.0 51,207,160 14,809 3,457.8 2,380.0 1,013 139,725 5,733.7 write
207
+ 0.0 40,211,592 4,773 8,424.8 6,319.0 1,000 661,710 13,119.1 pthread_mutex_lock
208
+ 0.0 16,225,859 10,123 1,602.9 1,387.0 1,000 17,416 730.6 epoll_ctl
209
+ 0.0 9,852,733 147 67,025.4 68,737.0 55,805 95,256 5,155.2 sleep
210
+ 0.0 7,858,705 22 357,213.9 474,706.5 8,796 678,261 278,233.7 pthread_rwlock_rdlock
211
+ 0.0 7,721,440 131 58,942.3 56,260.0 21,296 195,560 26,034.2 pthread_create
212
+ 0.0 7,224,126 929 7,776.2 3,096.0 1,000 86,864 11,142.9 fgets
213
+ 0.0 1,723,609 344 5,010.5 4,755.0 1,827 40,649 2,516.1 fopen64
214
+ 0.0 1,708,972 62 27,564.1 2,983.5 1,002 230,421 59,773.0 futex
215
+ 0.0 1,347,355 1,069 1,260.4 1,023.0 1,000 12,904 880.3 fclose
216
+ 0.0 1,149,466 196 5,864.6 3,579.5 1,105 168,420 13,582.2 mmap
217
+ 0.0 878,967 1 878,967.0 878,967.0 878,967 878,967 0.0 fork
218
+ 0.0 364,215 65 5,603.3 5,028.0 1,909 15,104 3,123.9 pipe2
219
+ 0.0 247,833 41 6,044.7 4,941.0 1,709 17,457 4,172.6 socket
220
+ 0.0 188,362 19 9,913.8 3,097.0 1,045 62,742 16,639.4 bind
221
+ 0.0 128,433 34 3,777.4 3,261.0 1,187 14,840 2,461.4 pthread_cond_broadcast
222
+ 0.0 76,747 7 10,963.9 9,959.0 3,576 31,262 9,493.1 fread
223
+ 0.0 65,399 41 1,595.1 1,200.0 1,012 5,988 1,063.6 fcntl
224
+ 0.0 49,079 5 9,815.8 9,542.0 4,750 17,158 4,761.0 accept4
225
+ 0.0 42,725 25 1,709.0 1,806.0 1,011 2,296 397.2 sigaction
226
+ 0.0 40,441 20 2,022.1 2,166.5 1,063 3,618 818.8 dup2
227
+ 0.0 39,878 15 2,658.5 2,065.0 1,267 7,040 1,459.6 stat
228
+ 0.0 31,245 12 2,603.8 1,918.0 1,006 5,220 1,771.4 fflush
229
+ 0.0 27,179 5 5,435.8 5,277.0 1,662 9,374 3,035.3 fwrite
230
+ 0.0 21,540 4 5,385.0 5,545.5 4,572 5,877 575.6 lstat
231
+ 0.0 17,255 4 4,313.8 4,516.5 2,856 5,366 1,051.1 flock
232
+ 0.0 16,827 9 1,869.7 1,599.0 1,008 3,313 844.3 pread
233
+ 0.0 15,569 10 1,556.9 1,444.0 1,184 2,260 325.7 listen
234
+ 0.0 13,074 3 4,358.0 4,294.0 4,285 4,495 118.7 fputs_unlocked
235
+ 0.0 12,439 5 2,487.8 2,713.0 1,831 3,023 566.6 mprotect
236
+ 0.0 7,489 4 1,872.3 1,856.5 1,636 2,140 206.8 flockfile
237
+ 0.0 6,919 1 6,919.0 6,919.0 6,919 6,919 0.0 kill
238
+ 0.0 5,460 2 2,730.0 2,730.0 2,008 3,452 1,021.1 openat64
239
+ 0.0 5,297 3 1,765.7 1,842.0 1,157 2,298 574.3 fstat
240
+ 0.0 3,627 1 3,627.0 3,627.0 3,627 3,627 0.0 fputs
241
+
242
+ [5/8] Executing 'cuda_api_sum' stats report
243
+
244
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
245
+ -------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
246
+ 65.2 20,318,348,211 12,196 1,665,984.6 3,806.0 1,713 143,905,820 4,877,424.6 cudaStreamSynchronize
247
+ 19.6 6,114,479,110 979,090 6,245.1 4,893.0 826 61,317,707 103,504.7 cudaLaunchKernel
248
+ 5.9 1,844,232,020 151,960 12,136.3 9,991.0 7,437 6,397,280 55,443.5 cudaGraphLaunch_v10000
249
+ 4.4 1,366,270,456 61,593 22,182.2 8,605.0 2,898 97,438,741 427,651.1 cudaMemcpyAsync
250
+ 2.1 657,913,070 123,014 5,348.3 4,791.0 646 11,218,430 74,172.4 cuLaunchKernel
251
+ 0.7 225,191,175 1,943 115,898.7 75,223.0 40,921 1,507,774 191,247.4 cudaGraphInstantiateWithFlags_v11040
252
+ 0.6 190,028,321 2,135 89,006.2 32,930.0 5,778 121,430,749 2,627,383.4 cudaDeviceSynchronize
253
+ 0.4 131,733,072 24,728 5,327.3 5,346.0 183 7,263,692 48,804.8 cudaMemsetAsync
254
+ 0.4 117,166,497 154,261 759.5 737.0 297 9,955 164.6 cudaStreamIsCapturing_v10000
255
+ 0.2 54,817,946 222 246,927.7 125,544.5 64,964 2,389,846 359,442.8 cudaFree
256
+ 0.1 41,574,143 348 119,465.9 111,957.5 6,496 1,314,648 70,124.0 cudaMalloc
257
+ 0.1 25,470,407 10 2,547,040.7 2,568,202.0 60,182 4,674,895 1,473,736.1 cuLibraryLoadData
258
+ 0.0 14,126,639 13,502 1,046.3 512.0 267 4,070,645 36,594.4 cuKernelGetFunction
259
+ 0.0 11,511,739 169 68,116.8 73,800.0 26,538 398,968 40,288.3 cuModuleLoadData
260
+ 0.0 9,477,345 18,895 501.6 477.0 305 7,151 120.4 cudaStreamGetCaptureInfo_v2_v11030
261
+ 0.0 8,547,159 1,943 4,398.9 4,349.0 3,274 12,306 644.1 cudaStreamBeginCapture_v10000
262
+ 0.0 7,583,507 1,943 3,903.0 3,886.0 2,371 10,115 530.0 cudaGraphDestroy_v10000
263
+ 0.0 2,953,354 128 23,073.1 2,127.0 1,339 976,651 118,496.6 cudaStreamCreateWithPriority
264
+ 0.0 2,583,759 1,943 1,329.8 1,322.0 973 2,362 129.3 cudaStreamEndCapture_v10000
265
+ 0.0 1,910,887 26 73,495.7 12,773.5 3,625 1,207,162 232,915.8 cudaHostAlloc
266
+ 0.0 1,625,828 1,943 836.8 771.0 625 3,016 254.6 cudaGraphGetNodes_v10000
267
+ 0.0 943,862 310 3,044.7 2,639.0 879 11,991 1,944.4 cudaEventQuery
268
+ 0.0 731,374 311 2,351.7 2,439.0 991 7,657 1,133.3 cudaEventRecord
269
+ 0.0 219,541 8 27,442.6 26,305.5 8,804 64,233 18,995.2 cudaMemGetInfo
270
+ 0.0 140,500 810 173.5 143.0 85 1,704 110.6 cuGetProcAddress_v2
271
+ 0.0 21,914 21 1,043.5 438.0 339 4,729 1,202.9 cudaEventCreateWithFlags
272
+ 0.0 16,258 16 1,016.1 849.5 502 2,663 551.3 cuLibraryGetKernel
273
+ 0.0 8,991 14 642.2 586.0 346 1,420 261.0 cudaThreadExchangeStreamCaptureMode_v10010
274
+ 0.0 4,849 3 1,616.3 1,664.0 1,386 1,799 210.6 cuInit
275
+ 0.0 3,460 4 865.0 749.0 110 1,852 882.7 cuModuleGetLoadingMode
276
+ 0.0 3,416 1 3,416.0 3,416.0 3,416 3,416 0.0 cudaStreamWaitEvent
277
+ 0.0 1,901 1 1,901.0 1,901.0 1,901 1,901 0.0 cudaEventDestroy
278
+ 0.0 1,166 2 583.0 583.0 248 918 473.8 cudaGetDriverEntryPoint_v11030
279
+
280
+ [6/8] Executing 'cuda_gpu_kern_sum' stats report
281
+
282
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
283
+ -------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
284
+ 33.5 9,507,682,829 84,588 112,399.9 58,880.0 5,728 569,477 137,306.6 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
285
+ 27.2 7,733,720,607 29,164 265,180.4 333,123.0 33,344 763,622 112,810.5 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
286
+ 7.4 2,089,759,636 1,164 1,795,326.1 1,390,859.0 40,065 4,518,698 1,024,759.1 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
287
+ 3.3 942,739,585 5,754 163,840.7 13,376.0 1,951 1,008,234 293,922.8 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
288
+ 3.3 942,069,183 76,664 12,288.3 8,032.0 6,240 73,248 8,482.6 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
289
+ 3.3 926,915,977 5,958 155,575.0 73,120.5 7,649 549,540 194,687.6 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
290
+ 2.8 781,701,830 1,991 392,617.7 496,547.0 10,528 506,724 194,713.8 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
291
+ 2.5 718,309,736 5,756 124,793.2 9,920.0 5,151 716,420 213,447.3 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
292
+ 2.4 679,170,209 292,768 2,319.8 1,889.0 1,631 6,304 962.1 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
293
+ 2.1 605,586,081 14,252 42,491.3 42,529.0 26,240 62,817 1,642.0 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
294
+ 1.8 516,743,427 13,776 37,510.4 37,472.0 36,608 42,560 332.8 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
295
+ 1.3 366,625,152 16,268 22,536.6 23,936.0 1,055 462,659 19,521.4 triton_poi_fused_mul_silu_1
296
+ 1.2 345,171,402 112 3,081,887.5 3,078,316.5 3,036,028 3,128,477 29,283.9 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
297
+ 0.9 260,532,523 513 507,860.7 507,843.0 506,403 509,475 427.2 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x1_tn_align8>(T1::Par…
298
+ 0.9 255,622,286 604 423,215.7 487,970.0 7,008 488,866 160,519.3 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
299
+ 0.9 242,493,430 161,056 1,505.6 1,280.0 1,023 3,488 458.1 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
300
+ 0.7 206,425,050 146,384 1,410.2 1,344.0 1,183 2,208 221.6 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
301
+ 0.7 203,499,133 184 1,105,973.5 579,541.0 369,795 2,808,909 981,858.0 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
302
+ 0.7 187,786,954 1,120 167,666.9 158,929.0 40,416 1,415,463 206,084.0 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
303
+ 0.6 180,432,004 16,268 11,091.2 11,936.0 1,505 111,617 4,905.8 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
304
+ 0.5 135,678,243 43,792 3,098.2 3,104.0 2,943 3,616 73.3 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
305
+ 0.3 98,055,325 32,872 2,982.9 2,945.0 2,847 3,233 98.3 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
306
+ 0.3 83,656,872 16,268 5,142.4 5,472.0 1,536 79,136 3,233.8 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
307
+ 0.2 52,052,585 15,687 3,318.2 3,457.0 1,344 22,048 905.0 triton_poi_fused_cat_3
308
+ 0.1 39,252,777 8 4,906,597.1 4,863,769.0 4,802,745 5,085,370 117,043.6 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
309
+ 0.1 35,101,287 15,687 2,237.6 2,336.0 863 16,672 687.2 triton_poi_fused_view_5
310
+ 0.1 26,987,031 17,256 1,563.9 1,408.0 1,023 2,784 465.1 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
311
+ 0.1 22,634,043 784 28,870.0 12,543.5 11,616 62,720 20,595.1 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
312
+ 0.1 22,584,985 15,687 1,439.7 1,440.0 1,215 6,720 194.4 triton_poi_fused_cat_4
313
+ 0.1 20,606,081 5,888 3,499.7 3,136.0 2,687 7,488 970.6 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
314
+ 0.1 20,451,721 4 5,112,930.3 5,112,153.5 4,937,754 5,289,660 201,308.2 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
315
+ 0.1 15,066,944 5,752 2,619.4 2,368.0 1,952 3,969 554.4 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
316
+ 0.1 14,293,381 28 510,477.9 512,002.5 468,706 513,474 8,208.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
317
+ 0.0 9,733,874 4 2,433,468.5 2,435,244.5 2,367,692 2,495,693 60,628.3 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
318
+ 0.0 9,136,049 28 326,287.5 326,210.0 324,514 329,538 982.3 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
319
+ 0.0 8,425,678 224 37,614.6 37,568.5 36,608 38,880 409.5 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
320
+ 0.0 7,777,256 2 3,888,628.0 3,888,628.0 3,705,715 4,071,541 258,678.0 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
321
+ 0.0 7,758,429 8,970 864.9 864.0 767 1,280 77.7 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
322
+ 0.0 7,519,688 5,754 1,306.9 1,152.0 1,023 2,048 279.6 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
323
+ 0.0 7,231,396 336 21,522.0 21,440.0 21,056 22,592 362.7 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
324
+ 0.0 6,130,365 476 12,878.9 12,800.5 11,744 14,432 645.4 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
325
+ 0.0 5,896,061 4 1,474,015.3 1,473,463.0 1,473,191 1,475,944 1,292.3 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
326
+ 0.0 5,380,367 5,752 935.4 896.0 863 1,344 76.3 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
327
+ 0.0 4,603,692 5,292 869.9 864.0 767 1,185 33.2 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
328
+ 0.0 3,996,949 2 1,998,474.5 1,998,474.5 1,996,490 2,000,459 2,806.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
329
+ 0.0 3,842,156 4,143 927.4 896.0 800 1,856 93.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
330
+ 0.0 3,593,747 56 64,174.1 64,144.5 63,105 65,728 478.5 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
331
+ 0.0 3,433,971 4 858,492.8 858,901.0 855,877 860,292 2,168.9 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
332
+ 0.0 3,191,215 2 1,595,607.5 1,595,607.5 1,560,359 1,630,856 49,848.9 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
333
+ 0.0 2,871,002 1,512 1,898.8 1,760.0 1,312 2,912 445.3 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
334
+ 0.0 2,643,071 581 4,549.2 4,671.0 1,984 36,256 1,427.2 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
335
+ 0.0 2,581,742 2 1,290,871.0 1,290,871.0 1,290,663 1,291,079 294.2 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
336
+ 0.0 2,581,389 2 1,290,694.5 1,290,694.5 1,290,406 1,290,983 408.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
337
+ 0.0 2,421,998 112 21,625.0 21,552.0 9,408 34,465 12,020.4 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
338
+ 0.0 1,835,794 581 3,159.7 3,200.0 1,632 39,200 1,543.9 triton_poi_fused_cat_1
339
+ 0.0 1,365,128 2 682,564.0 682,564.0 677,764 687,364 6,788.2 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
340
+ 0.0 1,304,994 581 2,246.1 2,368.0 863 14,272 629.4 triton_poi_fused_view_3
341
+ 0.0 1,202,982 56 21,481.8 21,456.0 21,152 21,888 275.5 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn
342
+ 0.0 1,027,854 1,153 891.5 896.0 800 1,216 36.7 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
343
+ 0.0 956,098 28 34,146.4 34,736.5 17,920 35,200 3,188.0 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
344
+ 0.0 847,695 581 1,459.0 1,440.0 1,216 9,408 336.4 triton_poi_fused_cat_2
345
+ 0.0 611,794 673 909.1 896.0 864 1,025 28.1 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<long>, std::array<char *, (uns…
346
+ 0.0 417,574 308 1,355.8 1,344.0 1,311 1,504 21.9 void vllm::merge_attn_states_kernel<__nv_bfloat16, (unsigned int)128>(T1 *, float *, const T1 *, co…
347
+ 0.0 295,335 168 1,757.9 1,760.0 1,535 2,080 119.1 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
348
+ 0.0 155,841 1 155,841.0 155,841.0 155,841 155,841 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
349
+ 0.0 78,880 1 78,880.0 78,880.0 78,880 78,880 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
350
+ 0.0 63,740 58 1,099.0 896.0 864 11,360 1,372.6 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
351
+ 0.0 43,936 1 43,936.0 43,936.0 43,936 43,936 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
352
+ 0.0 36,570 28 1,306.1 1,312.0 1,280 1,376 19.5 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
353
+ 0.0 26,816 1 26,816.0 26,816.0 26,816 26,816 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
354
+ 0.0 19,520 1 19,520.0 19,520.0 19,520 19,520 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
355
+ 0.0 11,936 11 1,085.1 864.0 864 1,568 286.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
356
+ 0.0 10,752 2 5,376.0 5,376.0 5,120 5,632 362.0 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
357
+ 0.0 9,152 2 4,576.0 4,576.0 4,480 4,672 135.8 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
358
+ 0.0 3,616 2 1,808.0 1,808.0 1,600 2,016 294.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
359
+ 0.0 3,424 2 1,712.0 1,712.0 1,664 1,760 67.9 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
360
+ 0.0 3,136 2 1,568.0 1,568.0 1,504 1,632 90.5 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
361
+ 0.0 3,104 2 1,552.0 1,552.0 1,344 1,760 294.2 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
362
+ 0.0 2,975 2 1,487.5 1,487.5 992 1,983 700.7 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
363
+ 0.0 2,912 2 1,456.0 1,456.0 1,344 1,568 158.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
364
+ 0.0 2,336 1 2,336.0 2,336.0 2,336 2,336 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
365
+ 0.0 1,184 1 1,184.0 1,184.0 1,184 1,184 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
366
+ 0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
367
+ 0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
368
+ 0.0 896 1 896.0 896.0 896 896 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
369
+
370
+ [7/8] Executing 'cuda_gpu_mem_time_sum' stats report
371
+
372
+ Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
373
+ -------- --------------- ------ -------- -------- -------- ---------- ----------- ------------------------------
374
+ 93.2 540,571,743 41,277 13,096.2 352.0 287 97,068,545 513,408.1 [CUDA memcpy Host-to-Device]
375
+ 3.2 18,710,334 14,564 1,284.7 896.0 864 1,362,855 22,521.7 [CUDA memcpy Device-to-Device]
376
+ 2.5 14,536,294 21,760 668.0 768.0 287 7,744 311.5 [CUDA memset]
377
+ 1.1 6,503,130 5,752 1,130.6 1,120.0 863 1,760 95.6 [CUDA memcpy Device-to-Host]
378
+
379
+ [8/8] Executing 'cuda_gpu_mem_size_sum' stats report
380
+
381
+ Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
382
+ ---------- ------ -------- -------- -------- -------- ----------- ------------------------------
383
+ 4,190.741 41,277 0.102 0.000 0.000 466.747 2.619 [CUDA memcpy Host-to-Device]
384
+ 2,534.048 14,564 0.174 0.003 0.003 622.330 10.312 [CUDA memcpy Device-to-Device]
385
+ 14.589 21,760 0.001 0.001 0.000 0.006 0.000 [CUDA memset]
386
+ 4.192 5,752 0.001 0.000 0.000 0.004 0.001 [CUDA memcpy Device-to-Host]
387
+
388
+ Generated:
389
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding.nsys-rep
390
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding.sqlite
sim_traverse_bs/traverse_bs_util_sim_decoding.nsys-rep ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9cb1e2cf07fcb0e9c21d44c66cdf7762bc995625a1a55e835b8cffc5a6104b8
3
+ size 109416019
sim_traverse_bs/traverse_bs_util_sim_decoding.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import statistics
4
+ from typing import List, Tuple, Dict
5
+
6
+ import torch
7
+ import torch.cuda.nvtx as nvtx
8
+
9
+ from vllm import LLM, SamplingParams
10
+ from transformers import AutoTokenizer
11
+
12
+ # ========= 强制使用 vLLM V1 =========
13
+ os.environ.setdefault("VLLM_USE_V1", "1")
14
+ os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
15
+
16
+ # 可选:打开 V1 metrics 统计
17
+ os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
18
+
19
+ # ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
20
+ try:
21
+ from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
22
+ except Exception:
23
+ Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
24
+
25
+ # ========= 配置 =========
26
+ MODEL_NAME = "Qwen/Qwen2-1.5B"
27
+ DTYPE = "bfloat16"
28
+ TP = 1
29
+ GPU_MEM_UTIL = 0.90
30
+ TRUST_REMOTE_CODE = True
31
+
32
+ # 场景:prefill=输入tokens,decode=输出tokens
33
+ SCENARIOS = [
34
+ # {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
35
+ {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 1024},
36
+ # {"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
37
+ ]
38
+
39
+ BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
40
+
41
+ SEED = 1234
42
+ TEMPERATURE = 0.0
43
+ TOP_P = 1.0
44
+ WARMUP_PER_BS = 1 # 每个批次做一次预热
45
+
46
+ # ========= 构造“精确 token 数量”的 prompt =========
47
+ def build_exact_token_prompt(tokenizer, target_len: int) -> str:
48
+ if target_len <= 1:
49
+ # 最小化 prompt:用一个简单 token(避免空串导致0 token)
50
+ ids = tokenizer("A", add_special_tokens=False)["input_ids"]
51
+ if len(ids) >= 1:
52
+ return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
53
+
54
+ base_text = (
55
+ "You are a helpful assistant. "
56
+ "Please analyze the following input and respond succinctly. "
57
+ )
58
+ chunk = " ".join(["data"] * 100) + ". "
59
+ text = base_text + chunk * 200 # 足够长的文本
60
+
61
+ lo, hi = 0, len(text)
62
+ target_ids = None
63
+ while lo <= hi:
64
+ mid = (lo + hi) // 2
65
+ ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
66
+ if len(ids) == target_len:
67
+ target_ids = ids
68
+ break
69
+ if len(ids) < target_len:
70
+ lo = mid + 1
71
+ else:
72
+ hi = mid - 1
73
+
74
+ if target_ids is None:
75
+ ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
76
+ if len(ids) > target_len:
77
+ target_ids = ids[:target_len]
78
+ else:
79
+ filler = " data"
80
+ while len(ids) < target_len:
81
+ ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
82
+ target_ids = ids[:target_len]
83
+
84
+ prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
85
+ # 断言精确长度
86
+ assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
87
+ return prompt
88
+
89
+ # ========= V1 metrics 抽取工具 =========
90
+ TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
91
+ TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
92
+
93
+ def _iter_children_of_vector(vec_obj):
94
+ for attr in ("children", "metrics", "series", "values", "samples", "items"):
95
+ if hasattr(vec_obj, attr):
96
+ val = getattr(vec_obj, attr)
97
+ if isinstance(val, dict):
98
+ for v in val.values():
99
+ yield v
100
+ else:
101
+ try:
102
+ for v in val:
103
+ yield v
104
+ except TypeError:
105
+ pass
106
+
107
+ def _collect_hist_sum_count(metrics, metric_name: str):
108
+ total_sum = 0.0
109
+ total_count = 0.0
110
+ for m in metrics:
111
+ mname = getattr(m, "name", None)
112
+ if mname != metric_name:
113
+ continue
114
+ # 直接 Histogram
115
+ if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
116
+ total_sum += float(getattr(m, "sum", 0.0))
117
+ total_count += float(getattr(m, "count", 0.0))
118
+ continue
119
+ # Vector[Histogram]
120
+ if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
121
+ for child in _iter_children_of_vector(m):
122
+ if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
123
+ total_sum += float(getattr(child, "sum", 0.0))
124
+ total_count += float(getattr(child, "count", 0.0))
125
+ return total_sum, total_count
126
+
127
+ def _metrics_snapshot(llm) -> Dict[str, float]:
128
+ try:
129
+ mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
130
+ except Exception:
131
+ return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
132
+ ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
133
+ tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
134
+ return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
135
+
136
+ def _metrics_delta(before: dict, after: dict):
137
+ return {
138
+ "ttft_sum": after["ttft_sum"] - before["ttft_sum"],
139
+ "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
140
+ "tpot_sum": after["tpot_sum"] - before["tpot_sum"],
141
+ "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
142
+ }
143
+
144
+ # ========= 带 NVTX 的 generate 包装 =========
145
+ def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
146
+ return llm.generate(prompts, params)
147
+
148
+ # ========= 统计格式化 =========
149
+ def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
150
+ xs = [v for v in x if (v == v)] # 过滤 NaN
151
+ if not xs:
152
+ return (float("nan"), float("nan"), float("nan"))
153
+ return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
154
+
155
+ def main():
156
+ print("--- vLLM V1 基准测试(含 NVTX 标记)---")
157
+ print(f"模型: {MODEL_NAME}")
158
+ print(f"批量大小: {BATCH_SIZES}")
159
+ print(f"场景: {[s['name'] for s in SCENARIOS]}")
160
+ print("-" * 60)
161
+
162
+ if not torch.cuda.is_available():
163
+ print("错误:需要 CUDA GPU。")
164
+ return
165
+
166
+ print("加载分词器/模型中...")
167
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
168
+
169
+ # 用 NVTX 标记模型加载阶段
170
+ nvtx.range_push("LLM_init")
171
+ llm = LLM(
172
+ model=MODEL_NAME,
173
+ tensor_parallel_size=TP,
174
+ dtype=DTYPE,
175
+ trust_remote_code=TRUST_REMOTE_CODE,
176
+ gpu_memory_utilization=GPU_MEM_UTIL,
177
+ max_num_seqs=1024, # 足够覆盖本次扫描
178
+ max_model_len=4096,
179
+ disable_log_stats=False, # 开启 V1 metrics 收集
180
+ )
181
+ nvtx.range_pop()
182
+ print("模型加载完成。")
183
+
184
+ for sc in SCENARIOS:
185
+ name = sc["name"]
186
+ prompt_tokens = sc["prompt_tokens"]
187
+ max_new_tokens = sc["max_new_tokens"]
188
+
189
+ print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
190
+
191
+ # 准备精确长度 prompt
192
+ prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
193
+
194
+ # 采样参数(贪心)
195
+ # sampling_params = SamplingParams(
196
+ # max_tokens=max_new_tokens,
197
+ # temperature=TEMPERATURE,
198
+ # top_p=TOP_P,
199
+ # seed=SEED,
200
+ # n=1,
201
+ # )
202
+
203
+ sampling_params = SamplingParams(
204
+ max_tokens=max_new_tokens, # 比如 512
205
+ # 关键点:
206
+ ignore_eos=True, # 忽略 EOS,继续生成
207
+ stop=None, # 不设 stop 字符串
208
+ stop_token_ids=[], # 不设 stop token
209
+ # 选配:如果你的 vLLM 版本支持
210
+ min_tokens=max_new_tokens, # 至少生成 N 个 token(≤ max_tokens)
211
+ temperature=0.0,
212
+ top_p=1.0,
213
+ )
214
+
215
+ # 记录每个 bs 的结果(便于后续统计或外部解析)
216
+ for bs in BATCH_SIZES:
217
+ print(f"\n--- 批量大小 bs={bs} ---")
218
+
219
+ prompts = [prompt_text] * bs
220
+
221
+ # 预热
222
+ # print("预热中...")
223
+ # nvtx.range_push(f"WARMUP [{name}] bs={bs}")
224
+ # _ = decorated_generate(llm, [prompts[0]], sampling_params)
225
+ # torch.cuda.synchronize()
226
+ # nvtx.range_pop()
227
+
228
+ # 正式计时与 V1 metrics
229
+ # nvtx.range_push(f"RUN [{name}] bs={bs}")
230
+ torch.cuda.synchronize()
231
+ snap_before = _metrics_snapshot(llm)
232
+ t0 = time.perf_counter()
233
+
234
+ nvtx.range_push(f"generate [{name}] bs={bs}")
235
+ outputs = decorated_generate(llm, prompts, sampling_params)
236
+ nvtx.range_pop() # generate
237
+
238
+ torch.cuda.synchronize()
239
+ t1 = time.perf_counter()
240
+ snap_after = _metrics_snapshot(llm)
241
+ # nvtx.range_pop() # RUN
242
+
243
+ duration = t1 - t0
244
+
245
+ # 统计 token 与吞吐
246
+ total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
247
+ avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
248
+ throughput = total_output_tokens / duration if duration > 0 else float("inf")
249
+
250
+ # 解析 V1 TTFT / 解码吞吐
251
+ delta = _metrics_delta(snap_before, snap_after)
252
+ if delta["ttft_cnt"] > 0:
253
+ ttft = delta["ttft_sum"] / delta["ttft_cnt"]
254
+ else:
255
+ ttft = float("nan")
256
+
257
+ if delta["tpot_cnt"] > 0:
258
+ avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
259
+ decode_tps = 1.0 / avg_tpot
260
+ else:
261
+ decode_tps = float("nan")
262
+
263
+ print(f"执行时间: {duration:.4f} s")
264
+ print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
265
+ print(f"生成总 tokens: {total_output_tokens}")
266
+ print(f"吞吐(生成tokens/秒): {throughput:.2f}")
267
+ print(f"TTFT (V1 metrics): {ttft:.4f} s")
268
+ print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
269
+
270
+ print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
271
+
272
+ if __name__ == "__main__":
273
+ print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
274
+ main()
sim_traverse_bs/traverse_bs_util_sim_decoding_1024.log ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WARNING: CPU IP/backtrace sampling not supported, disabling.
2
+ Try the 'nsys status --environment' command to learn more.
3
+
4
+ WARNING: CPU context switch tracing not supported, disabling.
5
+ Try the 'nsys status --environment' command to learn more.
6
+
7
+ INFO 08-13 21:32:20 [__init__.py:235] Automatically detected platform cuda.
8
+ CUDA_VISIBLE_DEVICES = 3
9
+ --- vLLM V1 基准测试(含 NVTX 标记)---
10
+ 模型: Qwen/Qwen2-1.5B
11
+ 批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
12
+ 场景: ['prefill1_decode512']
13
+ ------------------------------------------------------------
14
+ 加载分词器/模型中...
15
+ INFO 08-13 21:32:29 [config.py:1604] Using max model len 4096
16
+ INFO 08-13 21:32:30 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
17
+ INFO 08-13 21:32:35 [__init__.py:235] Automatically detected platform cuda.
18
+ INFO 08-13 21:32:36 [core.py:572] Waiting for init message from front-end.
19
+ INFO 08-13 21:32:36 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
20
+ INFO 08-13 21:32:38 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
21
+ WARNING 08-13 21:32:38 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
22
+ INFO 08-13 21:32:38 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
23
+ INFO 08-13 21:32:38 [gpu_model_runner.py:1875] Loading model from scratch...
24
+ INFO 08-13 21:32:38 [cuda.py:290] Using Flash Attention backend on V1 engine.
25
+ INFO 08-13 21:32:39 [weight_utils.py:296] Using model weights format ['*.safetensors']
26
+ INFO 08-13 21:32:39 [weight_utils.py:349] No model.safetensors.index.json found in remote.
27
+
28
+
29
+
30
+
31
+ INFO 08-13 21:32:40 [default_loader.py:262] Loading weights took 0.63 seconds
32
+ INFO 08-13 21:32:40 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 1.831000 seconds
33
+ INFO 08-13 21:32:46 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
34
+ INFO 08-13 21:32:46 [backends.py:541] Dynamo bytecode transform time: 5.56 s
35
+ INFO 08-13 21:32:51 [backends.py:161] Directly load the compiled graph(s) for dynamic shape from the cache, took 4.562 s
36
+ INFO 08-13 21:32:52 [monitor.py:34] torch.compile takes 5.56 s in total
37
+ INFO 08-13 21:32:53 [gpu_worker.py:255] Available KV cache memory: 12.81 GiB
38
+ INFO 08-13 21:32:53 [kv_cache_utils.py:833] GPU KV cache size: 479,536 tokens
39
+ INFO 08-13 21:32:53 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.07x
40
+
41
+ INFO 08-13 21:32:55 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
42
+ INFO 08-13 21:32:55 [core.py:193] init engine (profile, create kv cache, warmup model) took 14.97 seconds
43
+ 模型加载完成。
44
+
45
+ ===== 场景:prefill1_decode512 | prefill=1, decode=1024 =====
46
+
47
+ --- 批量大小 bs=1 ---
48
+
49
+
50
+ 执行时间: 6.4234 s
51
+ 实际平均输入 tokens: 1.00(目标 1)
52
+ 生成总 tokens: 1024
53
+ 吞吐(生成tokens/秒): 159.42
54
+ TTFT (V1 metrics): 0.0138 s
55
+ 解码吞吐 (V1 metrics): 159.67 tok/s
56
+
57
+ --- 批量大小 bs=2 ---
58
+
59
+
60
+ 执行时间: 7.2050 s
61
+ 实际平均输入 tokens: 1.00(目标 1)
62
+ 生成总 tokens: 2048
63
+ 吞吐(生成tokens/秒): 284.25
64
+ TTFT (V1 metrics): 0.0127 s
65
+ 解码吞吐 (V1 metrics): 142.32 tok/s
66
+
67
+ --- 批量大小 bs=4 ---
68
+
69
+
70
+ 执行时间: 7.2755 s
71
+ 实际平均输入 tokens: 1.00(目标 1)
72
+ 生成总 tokens: 4096
73
+ 吞吐(生成tokens/秒): 562.99
74
+ TTFT (V1 metrics): 0.0141 s
75
+ 解码吞吐 (V1 metrics): 140.94 tok/s
76
+
77
+ --- 批量大小 bs=8 ---
78
+
79
+
80
+ 执行时间: 7.5025 s
81
+ 实际平均输入 tokens: 1.00(目标 1)
82
+ 生成总 tokens: 8192
83
+ 吞吐(生成tokens/秒): 1091.90
84
+ TTFT (V1 metrics): 0.0187 s
85
+ 解码吞吐 (V1 metrics): 136.74 tok/s
86
+
87
+ --- 批量大小 bs=16 ---
88
+
89
+
90
+ 执行时间: 8.0210 s
91
+ 实际平均输入 tokens: 1.00(目标 1)
92
+ 生成总 tokens: 16384
93
+ 吞吐(生成tokens/秒): 2042.63
94
+ TTFT (V1 metrics): 0.0178 s
95
+ 解码吞吐 (V1 metrics): 127.93 tok/s
96
+
97
+ --- 批量大小 bs=32 ---
98
+
99
+
100
+ 执行时间: 8.0376 s
101
+ 实际平均输入 tokens: 1.00(目标 1)
102
+ 生成总 tokens: 32768
103
+ 吞吐(生成tokens/秒): 4076.81
104
+ TTFT (V1 metrics): 0.0169 s
105
+ 解码吞吐 (V1 metrics): 127.81 tok/s
106
+
107
+ --- 批量大小 bs=64 ---
108
+
109
+
110
+ 执行时间: 8.8357 s
111
+ 实际平均输入 tokens: 1.00(目标 1)
112
+ 生成总 tokens: 65536
113
+ 吞吐(生成tokens/秒): 7417.22
114
+ TTFT (V1 metrics): 0.0206 s
115
+ 解码吞吐 (V1 metrics): 116.44 tok/s
116
+
117
+ --- 批量大小 bs=128 ---
118
+
119
+
120
+ 执行时间: 11.1653 s
121
+ 实际平均输入 tokens: 1.00(目标 1)
122
+ 生成总 tokens: 131072
123
+ 吞吐(生成tokens/秒): 11739.23
124
+ TTFT (V1 metrics): 0.0323 s
125
+ 解码吞吐 (V1 metrics): 92.27 tok/s
126
+
127
+ --- 批量大小 bs=256 ---
128
+
129
+
130
+ 执行时间: 16.5525 s
131
+ 实际平均输入 tokens: 1.00(目标 1)
132
+ 生成总 tokens: 262144
133
+ 吞吐(生成tokens/秒): 15837.09
134
+ TTFT (V1 metrics): 0.0482 s
135
+ 解码吞吐 (V1 metrics): 62.35 tok/s
136
+
137
+ --- 批量大小 bs=512 ---
138
+
139
+
140
+ 执行时间: 30.5987 s
141
+ 实际平均输入 tokens: 1.00(目标 1)
142
+ 生成总 tokens: 524288
143
+ 吞吐(生成tokens/秒): 17134.34
144
+ TTFT (V1 metrics): 0.1151 s
145
+ 解码吞吐 (V1 metrics): 34.41 tok/s
146
+
147
+ --- 批量大小 bs=1024 ---
148
+
149
+
150
+ [rank0]:[W813 21:35:54.931843453 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
151
+ 执行时间: 65.0204 s
152
+ 实际平均输入 tokens: 1.00(目标 1)
153
+ 生成总 tokens: 1048576
154
+ 吞吐(生成tokens/秒): 16126.88
155
+ TTFT (V1 metrics): 0.1185 s
156
+ 解码吞吐 (V1 metrics): 17.89 tok/s
157
+
158
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
159
+ GPU 3: General Metrics for NVIDIA AD10x (any frequency)
160
+ Generating '/tmp/nsys-report-5e6c.qdstrm'
161
+
162
+
163
+ [3/8] Executing 'nvtx_sum' stats report
164
+
165
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
166
+ -------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- --------------------------------------
167
+ 30.8 65,019,709,544 1 65,019,709,544.0 65,019,709,544.0 65,019,709,544 65,019,709,544 0.0 PushPop :generate [prefill1_decode512] bs=1024
168
+ 16.4 34,528,225,294 1 34,528,225,294.0 34,528,225,294.0 34,528,225,294 34,528,225,294 0.0 PushPop :LLM_init
169
+ 14.5 30,598,009,745 1 30,598,009,745.0 30,598,009,745.0 30,598,009,745 30,598,009,745 0.0 PushPop :generate [prefill1_decode512] bs=512
170
+ 7.8 16,552,100,314 1 16,552,100,314.0 16,552,100,314.0 16,552,100,314 16,552,100,314 0.0 PushPop :generate [prefill1_decode512] bs=256
171
+ 5.3 11,165,129,518 1 11,165,129,518.0 11,165,129,518.0 11,165,129,518 11,165,129,518 0.0 PushPop :generate [prefill1_decode512] bs=128
172
+ 4.2 8,835,508,288 1 8,835,508,288.0 8,835,508,288.0 8,835,508,288 8,835,508,288 0.0 PushPop :generate [prefill1_decode512] bs=64
173
+ 3.8 8,037,503,827 1 8,037,503,827.0 8,037,503,827.0 8,037,503,827 8,037,503,827 0.0 PushPop :generate [prefill1_decode512] bs=32
174
+ 3.8 8,020,861,613 1 8,020,861,613.0 8,020,861,613.0 8,020,861,613 8,020,861,613 0.0 PushPop :generate [prefill1_decode512] bs=16
175
+ 3.6 7,502,439,278 1 7,502,439,278.0 7,502,439,278.0 7,502,439,278 7,502,439,278 0.0 PushPop :generate [prefill1_decode512] bs=8
176
+ 3.4 7,275,415,153 1 7,275,415,153.0 7,275,415,153.0 7,275,415,153 7,275,415,153 0.0 PushPop :generate [prefill1_decode512] bs=4
177
+ 3.4 7,204,870,191 1 7,204,870,191.0 7,204,870,191.0 7,204,870,191 7,204,870,191 0.0 PushPop :generate [prefill1_decode512] bs=2
178
+ 3.0 6,423,331,403 1 6,423,331,403.0 6,423,331,403.0 6,423,331,403 6,423,331,403 0.0 PushPop :generate [prefill1_decode512] bs=1
179
+ 0.0 97,220 2 48,610.0 48,610.0 47,061 50,159 2,190.6 PushPop CCCL:cub::DeviceSegmentedRadixSort
180
+
181
+ [4/8] Executing 'osrt_sum' stats report
182
+
183
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
184
+ -------- ----------------- --------- --------------- ---------------- --------- --------------- ---------------- ----------------------
185
+ 29.4 2,592,016,067,300 89,110 29,087,824.8 37,176.5 1,000 196,381,349,289 1,472,090,154.0 pthread_cond_timedwait
186
+ 24.1 2,131,420,272,351 832 2,561,803,212.0 17,028.0 6,962 196,382,583,332 22,164,783,605.9 pthread_cond_wait
187
+ 23.8 2,103,264,859,788 139,802 15,044,597.8 10,063,165.0 1,000 182,210,183,998 708,819,423.9 epoll_wait
188
+ 8.6 757,597,580,457 97 7,810,284,334.6 10,000,087,607.0 9,123 10,000,141,025 4,049,277,230.6 sem_timedwait
189
+ 8.1 715,974,502,372 73,851 9,694,851.8 1,424.0 1,000 30,237,953,598 166,855,063.6 poll
190
+ 3.5 307,209,613,206 23,827 12,893,340.0 7,249,780.0 28,136 424,017,307 14,586,617.3 sem_wait
191
+ 2.4 212,178,624,327 52,115 4,071,354.2 2,015.0 1,000 194,509,488,551 853,307,229.4 read
192
+ 0.0 4,204,021,023 771 5,452,686.2 3,184,955.0 1,046 47,259,678 6,190,683.1 pthread_rwlock_wrlock
193
+ 0.0 556,982,469 205,066 2,716.1 1,395.0 1,015 72,058,054 159,155.6 munmap
194
+ 0.0 508,440,444 693 733,680.3 1,074.0 1,000 501,674,334 19,056,845.3 waitpid
195
+ 0.0 282,788,889 8,972 31,519.0 8,207.5 1,000 19,677,480 317,215.8 ioctl
196
+ 0.0 207,036,840 369 561,075.4 2,063.0 1,059 22,500,907 3,196,025.5 fopen
197
+ 0.0 137,675,005 16,197 8,500.0 5,777.0 1,292 74,868 7,416.4 send
198
+ 0.0 134,110,298 26,256 5,107.8 5,241.0 1,000 4,804,910 33,899.5 mmap64
199
+ 0.0 125,003,708 16,189 7,721.5 5,348.0 1,028 2,620,379 27,095.4 recv
200
+ 0.0 124,828,097 71,937 1,735.2 1,593.0 1,000 57,072 823.9 pthread_cond_signal
201
+ 0.0 121,590,749 24 5,066,281.2 5,064,224.5 5,053,813 5,077,752 6,272.7 nanosleep
202
+ 0.0 109,066,343 11,638 9,371.6 6,763.0 1,012 350,375 9,043.1 pthread_mutex_lock
203
+ 0.0 93,279,596 27,220 3,426.9 2,358.5 1,024 153,763 5,757.8 write
204
+ 0.0 88,935,356 31,463 2,826.7 2,452.0 1,000 113,592 1,855.8 open64
205
+ 0.0 69,874,922 94 743,350.2 3,294.5 1,031 18,719,114 3,530,346.8 open
206
+ 0.0 68,894,671 38 1,813,017.7 580,319.0 4,100 10,403,699 3,383,579.7 pthread_join
207
+ 0.0 58,572,963 10 5,857,296.3 19,236.5 8,226 58,388,275 18,457,510.0 connect
208
+ 0.0 32,577,628 39 835,323.8 1,010,499.0 19,103 1,444,190 573,956.4 pthread_rwlock_rdlock
209
+ 0.0 29,134,753 18,187 1,602.0 1,332.0 1,000 44,360 802.2 epoll_ctl
210
+ 0.0 13,985,406 147 95,138.8 68,564.0 54,005 4,263,847 346,238.1 sleep
211
+ 0.0 7,106,157 131 54,245.5 47,729.0 19,806 155,514 26,100.4 pthread_create
212
+ 0.0 5,907,972 868 6,806.4 4,068.0 1,008 97,687 8,868.4 fgets
213
+ 0.0 1,715,987 65 26,399.8 3,330.0 1,055 473,306 73,248.1 futex
214
+ 0.0 1,658,264 344 4,820.5 4,549.0 1,638 51,357 2,832.0 fopen64
215
+ 0.0 1,333,956 1,140 1,170.1 1,036.0 1,000 9,415 572.9 fclose
216
+ 0.0 1,043,640 1 1,043,640.0 1,043,640.0 1,043,640 1,043,640 0.0 fork
217
+ 0.0 913,975 195 4,687.1 3,377.0 1,101 52,405 6,163.2 mmap
218
+ 0.0 836,573 2 418,286.5 418,286.5 6,782 829,791 581,955.2 kill
219
+ 0.0 358,003 65 5,507.7 4,718.0 2,188 14,024 2,925.3 pipe2
220
+ 0.0 215,020 41 5,244.4 3,970.0 1,620 17,421 3,543.4 socket
221
+ 0.0 161,868 37 4,374.8 2,946.0 1,083 14,013 3,353.3 pthread_cond_broadcast
222
+ 0.0 121,455 17 7,144.4 2,996.0 1,029 34,938 10,194.7 bind
223
+ 0.0 77,221 7 11,031.6 7,581.0 4,133 36,236 11,339.1 fread
224
+ 0.0 39,450 15 2,630.0 1,819.0 1,186 7,083 1,552.8 stat
225
+ 0.0 37,451 23 1,628.3 1,663.0 1,048 2,012 323.2 sigaction
226
+ 0.0 35,497 5 7,099.4 8,915.0 3,047 10,850 3,562.4 accept4
227
+ 0.0 33,739 18 1,874.4 2,021.0 1,184 2,697 563.0 dup2
228
+ 0.0 24,284 12 2,023.7 1,304.0 1,000 5,336 1,446.7 fcntl
229
+ 0.0 23,323 5 4,664.6 3,577.0 2,140 8,317 2,528.9 fwrite
230
+ 0.0 22,194 7 3,170.6 3,746.0 1,157 5,067 1,793.0 fflush
231
+ 0.0 18,242 4 4,560.5 4,522.5 4,277 4,920 305.0 lstat
232
+ 0.0 16,543 4 4,135.8 3,653.0 2,777 6,460 1,657.0 flock
233
+ 0.0 15,834 8 1,979.3 1,650.0 1,263 3,278 825.8 pread
234
+ 0.0 12,843 5 2,568.6 2,703.0 1,847 3,016 448.7 mprotect
235
+ 0.0 12,668 3 4,222.7 4,276.0 4,068 4,324 136.1 fputs_unlocked
236
+ 0.0 9,955 7 1,422.1 1,315.0 1,127 2,334 418.4 listen
237
+ 0.0 6,128 3 2,042.7 2,107.0 1,885 2,136 137.3 flockfile
238
+ 0.0 4,323 3 1,441.0 1,234.0 1,233 1,856 359.4 fstat
239
+ 0.0 4,204 2 2,102.0 2,102.0 1,413 2,791 974.4 openat64
240
+ 0.0 3,018 1 3,018.0 3,018.0 3,018 3,018 0.0 fputs
241
+
242
+ [5/8] Executing 'cuda_api_sum' stats report
243
+
244
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
245
+ -------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
246
+ 45.1 12,813,768,740 19,112 670,456.7 3,838.0 1,387 143,887,295 1,931,593.0 cudaStreamSynchronize
247
+ 34.5 9,797,481,802 1,507,570 6,498.9 4,801.0 787 60,705,277 111,719.0 cudaLaunchKernel
248
+ 11.7 3,315,922,316 267,434 12,399.0 9,714.0 7,444 13,005,521 119,290.9 cudaGraphLaunch_v10000
249
+ 5.4 1,523,067,920 103,470 14,719.9 8,395.0 2,778 106,794,386 373,560.2 cudaMemcpyAsync
250
+ 0.8 220,113,686 1,943 113,285.5 73,444.0 40,977 2,057,968 189,715.3 cudaGraphInstantiateWithFlags_v11040
251
+ 0.8 215,892,156 269,735 800.4 768.0 324 15,838 178.2 cudaStreamIsCapturing_v10000
252
+ 0.5 141,444,195 2,135 66,250.2 31,760.0 6,027 72,254,094 1,563,196.6 cudaDeviceSynchronize
253
+ 0.5 137,833,300 38,438 3,585.9 4,058.0 652 155,081 2,294.0 cuLaunchKernel
254
+ 0.2 67,108,465 14,485 4,633.0 5,021.0 183 3,200,351 28,944.6 cudaMemsetAsync
255
+ 0.2 53,001,649 222 238,746.2 117,647.0 63,745 2,285,490 340,113.2 cudaFree
256
+ 0.1 40,868,863 348 117,439.3 109,378.5 5,752 1,125,389 63,264.5 cudaMalloc
257
+ 0.1 24,861,223 10 2,486,122.3 2,553,390.5 56,705 4,320,787 1,404,300.5 cuLibraryLoadData
258
+ 0.1 16,696,302 15,454 1,080.4 655.0 263 4,184,955 35,857.1 cuKernelGetFunction
259
+ 0.0 13,006,871 169 76,963.7 68,902.0 27,343 356,192 46,120.4 cuModuleLoadData
260
+ 0.0 9,079,336 18,895 480.5 453.0 297 8,904 131.4 cudaStreamGetCaptureInfo_v2_v11030
261
+ 0.0 8,354,276 1,943 4,299.7 4,189.0 3,373 20,932 803.4 cudaStreamBeginCapture_v10000
262
+ 0.0 7,675,564 1,943 3,950.4 3,913.0 2,470 13,545 599.9 cudaGraphDestroy_v10000
263
+ 0.0 3,107,178 128 24,274.8 2,264.5 1,433 1,101,304 127,352.9 cudaStreamCreateWithPriority
264
+ 0.0 2,506,159 1,943 1,289.8 1,277.0 976 1,937 112.9 cudaStreamEndCapture_v10000
265
+ 0.0 1,952,904 24 81,371.0 8,307.5 3,591 1,241,446 249,577.9 cudaHostAlloc
266
+ 0.0 1,595,388 1,943 821.1 749.0 636 2,762 252.4 cudaGraphGetNodes_v10000
267
+ 0.0 293,162 98 2,991.4 3,357.0 887 6,560 1,554.2 cudaEventQuery
268
+ 0.0 268,050 99 2,707.6 2,796.0 1,146 7,573 1,146.9 cudaEventRecord
269
+ 0.0 255,527 8 31,940.9 29,099.5 9,678 69,087 22,331.0 cudaMemGetInfo
270
+ 0.0 132,043 810 163.0 131.5 69 1,615 113.9 cuGetProcAddress_v2
271
+ 0.0 22,323 16 1,395.2 921.0 431 4,897 1,232.4 cuLibraryGetKernel
272
+ 0.0 20,937 21 997.0 467.0 277 4,218 1,099.3 cudaEventCreateWithFlags
273
+ 0.0 8,033 14 573.8 570.5 312 918 173.3 cudaThreadExchangeStreamCaptureMode_v10010
274
+ 0.0 4,402 3 1,467.3 1,282.0 1,262 1,858 338.5 cuInit
275
+ 0.0 3,502 1 3,502.0 3,502.0 3,502 3,502 0.0 cudaStreamWaitEvent
276
+ 0.0 2,574 4 643.5 523.0 87 1,441 663.1 cuModuleGetLoadingMode
277
+ 0.0 1,563 1 1,563.0 1,563.0 1,563 1,563 0.0 cudaEventDestroy
278
+ 0.0 918 2 459.0 459.0 270 648 267.3 cudaGetDriverEntryPoint_v11030
279
+
280
+ [6/8] Executing 'cuda_gpu_kern_sum' stats report
281
+
282
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
283
+ -------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
284
+ 38.9 7,460,281,214 82,205 90,752.2 69,057.0 5,760 316,868 87,744.0 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
285
+ 16.0 3,075,389,176 176,008 17,473.0 9,088.0 6,304 136,609 16,733.1 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
286
+ 8.0 1,542,686,749 3,521 438,138.8 496,805.0 10,624 504,710 155,358.7 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
287
+ 7.1 1,364,722,257 2,113 645,869.5 573,543.0 186,818 763,722 119,005.8 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
288
+ 6.7 1,288,109,699 5,868 219,514.3 86,640.5 7,584 548,071 233,154.4 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
289
+ 4.7 899,528,528 458,577 1,961.6 1,888.0 1,632 3,583 325.8 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
290
+ 2.7 520,608,894 1,025 507,911.1 507,878.0 506,278 509,990 414.6 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x1_tn_align8>(T1::Par…
291
+ 2.6 505,946,595 1,117 452,951.3 488,004.0 7,071 488,933 122,330.3 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
292
+ 1.9 368,335,550 9,223 39,936.6 7,808.0 1,952 1,005,962 67,492.9 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
293
+ 1.8 345,618,817 258,213 1,338.5 1,248.0 992 2,047 191.4 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
294
+ 1.6 316,168,888 9,225 34,273.1 8,224.0 5,152 712,902 62,162.9 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
295
+ 1.6 309,202,466 229,288 1,348.5 1,312.0 1,183 2,080 171.7 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
296
+ 1.3 250,234,957 71,932 3,478.8 3,488.0 3,231 4,128 128.3 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
297
+ 0.9 179,336,549 57,456 3,121.3 3,135.0 2,943 4,992 78.7 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
298
+ 0.8 157,635,915 924 170,601.6 162,545.5 40,256 1,415,021 225,677.4 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
299
+ 0.7 140,934,798 46,620 3,023.1 3,040.0 2,847 3,232 98.2 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
300
+ 0.4 79,079,082 29 2,726,864.9 2,804,313.0 584,168 2,808,793 412,123.5 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
301
+ 0.2 39,299,362 8 4,912,420.3 4,870,556.0 4,820,171 5,103,438 113,542.1 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
302
+ 0.2 37,974,768 27,663 1,372.8 1,376.0 1,055 2,145 222.9 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
303
+ 0.2 29,065,343 9,357 3,106.3 3,104.0 2,687 7,488 192.9 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
304
+ 0.1 22,668,389 784 28,913.8 12,512.0 11,584 62,529 20,679.1 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
305
+ 0.1 21,873,260 9,221 2,372.1 2,336.0 1,952 3,008 199.7 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
306
+ 0.1 21,556,364 1,932 11,157.5 4,384.0 1,055 461,252 54,521.7 triton_poi_fused_mul_silu_1
307
+ 0.1 20,487,032 4 5,121,758.0 5,121,598.5 4,943,820 5,300,015 200,903.2 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
308
+ 0.1 14,291,746 28 510,419.5 511,797.0 469,540 512,996 8,022.2 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
309
+ 0.1 13,279,798 142 93,519.7 51,680.0 50,400 3,040,379 351,298.3 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
310
+ 0.1 10,969,719 9,223 1,189.4 1,152.0 1,023 1,760 134.6 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
311
+ 0.1 10,416,865 12,059 863.8 895.0 767 1,280 54.5 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
312
+ 0.1 10,035,051 1,932 5,194.1 3,455.5 1,568 112,066 12,675.8 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
313
+ 0.1 9,775,446 4 2,443,861.5 2,443,525.5 2,389,237 2,499,158 60,370.1 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
314
+ 0.0 9,151,987 28 326,856.7 327,058.5 324,132 331,139 1,248.5 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
315
+ 0.0 8,406,218 224 37,527.8 37,536.0 36,608 38,752 441.5 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
316
+ 0.0 8,365,002 9,222 907.1 896.0 895 993 15.6 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
317
+ 0.0 8,036,497 9,222 871.4 864.0 831 960 17.1 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
318
+ 0.0 7,799,398 2 3,899,699.0 3,899,699.0 3,708,001 4,091,397 271,101.9 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
319
+ 0.0 7,231,222 336 21,521.5 21,440.0 21,088 22,752 357.7 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
320
+ 0.0 6,510,065 7,157 909.6 895.0 832 2,080 79.6 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
321
+ 0.0 6,285,367 1,932 3,253.3 2,017.0 1,535 78,849 9,097.9 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
322
+ 0.0 6,109,827 476 12,835.8 12,832.0 11,584 14,368 733.3 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
323
+ 0.0 5,896,822 4 1,474,205.5 1,473,277.0 1,473,006 1,477,262 2,045.1 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
324
+ 0.0 4,385,449 140 31,324.6 27,232.0 26,080 50,113 8,339.5 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
325
+ 0.0 4,015,431 1,863 2,155.4 1,823.0 1,375 22,753 2,300.3 triton_poi_fused_cat_3
326
+ 0.0 3,999,332 2 1,999,666.0 1,999,666.0 1,997,394 2,001,938 3,213.1 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
327
+ 0.0 3,594,888 56 64,194.4 64,161.0 63,041 65,313 437.2 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
328
+ 0.0 3,434,878 4 858,719.5 857,975.5 856,167 862,760 3,016.9 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
329
+ 0.0 3,192,956 2 1,596,478.0 1,596,478.0 1,565,902 1,627,054 43,241.0 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
330
+ 0.0 2,856,853 1,512 1,889.5 1,712.0 1,311 2,944 453.7 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
331
+ 0.0 2,644,615 1,863 1,419.5 1,151.0 863 16,640 1,781.0 triton_poi_fused_view_5
332
+ 0.0 2,613,678 1,863 1,402.9 1,344.0 1,215 6,624 562.0 triton_poi_fused_cat_4
333
+ 0.0 2,581,335 2 1,290,667.5 1,290,667.5 1,290,379 1,290,956 408.0 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
334
+ 0.0 2,579,032 2 1,289,516.0 1,289,516.0 1,287,756 1,291,276 2,489.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
335
+ 0.0 2,426,037 112 21,661.0 21,568.0 9,473 34,721 12,050.1 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
336
+ 0.0 1,840,620 2,069 889.6 896.0 863 960 16.1 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
337
+ 0.0 1,371,084 2 685,542.0 685,542.0 680,966 690,118 6,471.4 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
338
+ 0.0 1,149,932 28 41,069.0 41,072.0 40,449 43,297 529.2 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
339
+ 0.0 962,005 1,054 912.7 897.0 895 992 18.8 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<long>, std::array<char *, (uns…
340
+ 0.0 957,484 28 34,195.9 34,848.5 18,177 35,297 3,147.6 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
341
+ 0.0 295,678 168 1,760.0 1,760.0 1,535 1,984 116.8 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
342
+ 0.0 254,179 69 3,683.8 3,040.0 2,016 36,896 4,118.3 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
343
+ 0.0 186,527 69 2,703.3 2,080.0 1,600 39,296 4,485.1 triton_poi_fused_cat_1
344
+ 0.0 155,234 1 155,234.0 155,234.0 155,234 155,234 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
345
+ 0.0 99,648 69 1,444.2 1,344.0 1,216 8,704 890.0 triton_poi_fused_cat_2
346
+ 0.0 97,824 69 1,417.7 1,184.0 864 14,337 1,605.4 triton_poi_fused_view_3
347
+ 0.0 79,521 1 79,521.0 79,521.0 79,521 79,521 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
348
+ 0.0 64,605 58 1,113.9 896.0 895 11,328 1,368.2 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
349
+ 0.0 44,704 1 44,704.0 44,704.0 44,704 44,704 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
350
+ 0.0 36,892 28 1,317.6 1,312.0 1,280 1,375 23.0 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
351
+ 0.0 26,657 1 26,657.0 26,657.0 26,657 26,657 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
352
+ 0.0 19,520 1 19,520.0 19,520.0 19,520 19,520 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
353
+ 0.0 11,903 11 1,082.1 865.0 863 1,567 288.8 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
354
+ 0.0 11,009 2 5,504.5 5,504.5 5,472 5,537 46.0 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
355
+ 0.0 9,217 2 4,608.5 4,608.5 4,545 4,672 89.8 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
356
+ 0.0 3,809 2 1,904.5 1,904.5 1,665 2,144 338.7 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
357
+ 0.0 3,616 2 1,808.0 1,808.0 1,600 2,016 294.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
358
+ 0.0 3,232 2 1,616.0 1,616.0 1,504 1,728 158.4 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
359
+ 0.0 3,103 2 1,551.5 1,551.5 1,120 1,983 610.2 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
360
+ 0.0 2,913 2 1,456.5 1,456.5 1,345 1,568 157.7 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
361
+ 0.0 2,880 2 1,440.0 1,440.0 1,312 1,568 181.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
362
+ 0.0 2,304 1 2,304.0 2,304.0 2,304 2,304 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
363
+ 0.0 1,216 1 1,216.0 1,216.0 1,216 1,216 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
364
+ 0.0 1,025 1 1,025.0 1,025.0 1,025 1,025 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
365
+ 0.0 992 1 992.0 992.0 992 992 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
366
+ 0.0 895 1 895.0 895.0 895 895 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
367
+
368
+ [7/8] Executing 'cuda_gpu_mem_time_sum' stats report
369
+
370
+ Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
371
+ -------- --------------- ------ -------- -------- -------- ----------- ----------- ------------------------------
372
+ 91.9 532,693,904 65,321 8,155.0 352.0 287 106,336,118 441,449.3 [CUDA memcpy Host-to-Device]
373
+ 5.5 31,855,936 28,928 1,101.2 897.0 864 1,362,988 15,981.0 [CUDA memcpy Device-to-Device]
374
+ 1.8 10,196,329 9,221 1,105.8 1,120.0 863 1,696 90.0 [CUDA memcpy Device-to-Host]
375
+ 0.8 4,628,573 11,573 399.9 352.0 288 2,240 166.8 [CUDA memset]
376
+
377
+ [8/8] Executing 'cuda_gpu_mem_size_sum' stats report
378
+
379
+ Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
380
+ ---------- ------ -------- -------- -------- -------- ----------- ------------------------------
381
+ 3,634.332 65,321 0.056 0.000 0.000 466.747 2.080 [CUDA memcpy Host-to-Device]
382
+ 2,578.174 28,928 0.089 0.003 0.003 622.330 7.318 [CUDA memcpy Device-to-Device]
383
+ 4.048 11,573 0.000 0.000 0.000 0.006 0.001 [CUDA memset]
384
+ 2.077 9,221 0.000 0.000 0.000 0.001 0.000 [CUDA memcpy Device-to-Host]
385
+
386
+ Generated:
387
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding_1024.nsys-rep
388
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding_1024.sqlite
sim_traverse_bs/traverse_bs_util_sim_decoding_1024.nsys-rep ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:076aaf8297b058dc3a50d9858116d1dbaa2dbe038c6ca5881b0edb7f2fb3d314
3
+ size 177488267
sim_traverse_bs/traverse_bs_util_sim_prefill.log ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WARNING: CPU IP/backtrace sampling not supported, disabling.
2
+ Try the 'nsys status --environment' command to learn more.
3
+
4
+ WARNING: CPU context switch tracing not supported, disabling.
5
+ Try the 'nsys status --environment' command to learn more.
6
+
7
+ INFO 08-13 20:36:20 [__init__.py:235] Automatically detected platform cuda.
8
+ CUDA_VISIBLE_DEVICES = 3
9
+ --- vLLM V1 基准测试(含 NVTX 标记)---
10
+ 模型: Qwen/Qwen2-1.5B
11
+ 批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
12
+ 场景: ['prefill640_decode1']
13
+ ------------------------------------------------------------
14
+ 加载分词器/模型中...
15
+ INFO 08-13 20:36:30 [config.py:1604] Using max model len 4096
16
+ INFO 08-13 20:36:30 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
17
+ INFO 08-13 20:36:35 [__init__.py:235] Automatically detected platform cuda.
18
+ INFO 08-13 20:36:37 [core.py:572] Waiting for init message from front-end.
19
+ INFO 08-13 20:36:37 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
20
+ INFO 08-13 20:36:39 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
21
+ WARNING 08-13 20:36:39 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
22
+ INFO 08-13 20:36:39 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
23
+ INFO 08-13 20:36:39 [gpu_model_runner.py:1875] Loading model from scratch...
24
+ INFO 08-13 20:36:39 [cuda.py:290] Using Flash Attention backend on V1 engine.
25
+ INFO 08-13 20:36:40 [weight_utils.py:296] Using model weights format ['*.safetensors']
26
+ INFO 08-13 20:36:40 [weight_utils.py:349] No model.safetensors.index.json found in remote.
27
+
28
+
29
+
30
+
31
+ INFO 08-13 20:36:41 [default_loader.py:262] Loading weights took 0.74 seconds
32
+ INFO 08-13 20:36:42 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 2.308288 seconds
33
+ INFO 08-13 20:36:48 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
34
+ INFO 08-13 20:36:48 [backends.py:541] Dynamo bytecode transform time: 6.35 s
35
+ INFO 08-13 20:36:53 [backends.py:161] Directly load the compiled graph(s) for dynamic shape from the cache, took 3.954 s
36
+ INFO 08-13 20:36:54 [monitor.py:34] torch.compile takes 6.35 s in total
37
+ INFO 08-13 20:36:54 [gpu_worker.py:255] Available KV cache memory: 12.81 GiB
38
+ INFO 08-13 20:36:55 [kv_cache_utils.py:833] GPU KV cache size: 479,536 tokens
39
+ INFO 08-13 20:36:55 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.07x
40
+
41
+ INFO 08-13 20:36:57 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
42
+ INFO 08-13 20:36:57 [core.py:193] init engine (profile, create kv cache, warmup model) took 15.10 seconds
43
+ 模型加载完成。
44
+
45
+ ===== 场景:prefill640_decode1 | prefill=640, decode=1 =====
46
+
47
+ --- 批量大小 bs=1 ---
48
+ 预热中...
49
+
50
+
51
+
52
+
53
+ 执行时间: 0.0106 s
54
+ 实际平均输入 tokens: 640.00(目标 640)
55
+ 生成总 tokens: 1
56
+ 吞吐(生成tokens/秒): 94.66
57
+ TTFT (V1 metrics): 0.0098 s
58
+ 解码吞吐 (V1 metrics): nan tok/s
59
+
60
+ --- 批量大小 bs=2 ---
61
+
62
+
63
+ 执行时间: 0.0193 s
64
+ 实际平均输入 tokens: 640.00(目标 640)
65
+ 生成总 tokens: 2
66
+ 吞吐(生成tokens/秒): 103.58
67
+ TTFT (V1 metrics): 0.0140 s
68
+ 解码吞吐 (V1 metrics): nan tok/s
69
+
70
+ --- 批量大小 bs=4 ---
71
+
72
+
73
+ 执行时间: 0.0242 s
74
+ 实际平均输入 tokens: 640.00(目标 640)
75
+ 生成总 tokens: 4
76
+ 吞吐(生成tokens/秒): 165.24
77
+ TTFT (V1 metrics): 0.0194 s
78
+ 解码吞吐 (V1 metrics): nan tok/s
79
+
80
+ --- 批量大小 bs=8 ---
81
+
82
+
83
+ 执行时间: 0.0370 s
84
+ 实际平均输入 tokens: 640.00(目标 640)
85
+ 生成总 tokens: 8
86
+ 吞吐(生成tokens/秒): 216.13
87
+ TTFT (V1 metrics): 0.0202 s
88
+ 解码吞吐 (V1 metrics): nan tok/s
89
+
90
+ --- 批量大小 bs=16 ---
91
+
92
+
93
+ 执行时间: 0.0466 s
94
+ 实际平均输入 tokens: 640.00(目标 640)
95
+ 生成总 tokens: 16
96
+ 吞吐(生成tokens/秒): 343.63
97
+ TTFT (V1 metrics): 0.0267 s
98
+ 解码吞吐 (V1 metrics): nan tok/s
99
+
100
+ --- 批量大小 bs=32 ---
101
+
102
+
103
+ 执行时间: 0.0620 s
104
+ 实际平均输入 tokens: 640.00(目标 640)
105
+ 生成总 tokens: 32
106
+ 吞吐(生成tokens/秒): 516.16
107
+ TTFT (V1 metrics): 0.0279 s
108
+ 解码吞吐 (V1 metrics): nan tok/s
109
+
110
+ --- 批量大小 bs=64 ---
111
+
112
+
113
+ 执行时间: 0.1069 s
114
+ 实际平均输入 tokens: 640.00(目标 640)
115
+ 生成总 tokens: 64
116
+ 吞吐(生成tokens/秒): 598.81
117
+ TTFT (V1 metrics): 0.0496 s
118
+ 解码吞吐 (V1 metrics): nan tok/s
119
+
120
+ --- 批量大小 bs=128 ---
121
+
122
+
123
+ 执行时间: 0.1979 s
124
+ 实际平均输入 tokens: 640.00(目标 640)
125
+ 生成总 tokens: 128
126
+ 吞吐(生成tokens/秒): 646.67
127
+ TTFT (V1 metrics): 0.0973 s
128
+ 解码吞吐 (V1 metrics): nan tok/s
129
+
130
+ --- 批量大小 bs=256 ---
131
+
132
+
133
+ 执行时间: 0.3878 s
134
+ 实际平均输入 tokens: 640.00(目标 640)
135
+ 生成总 tokens: 256
136
+ 吞吐(生成tokens/秒): 660.13
137
+ TTFT (V1 metrics): 0.1947 s
138
+ 解码吞吐 (V1 metrics): nan tok/s
139
+
140
+ --- 批量大小 bs=512 ---
141
+
142
+
143
+ 执行时间: 0.9331 s
144
+ 实际平均输入 tokens: 640.00(目标 640)
145
+ 生成总 tokens: 512
146
+ 吞吐(生成tokens/秒): 548.69
147
+ TTFT (V1 metrics): 0.3830 s
148
+ 解码吞吐 (V1 metrics): nan tok/s
149
+
150
+ --- 批量大小 bs=1024 ---
151
+
152
+
153
+ [rank0]:[W813 20:37:02.968083070 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
154
+ 执行时间: 1.4194 s
155
+ 实际平均输入 tokens: 640.00(目标 640)
156
+ 生成总 tokens: 1024
157
+ 吞吐(生成tokens/秒): 721.42
158
+ TTFT (V1 metrics): 0.7078 s
159
+ 解码吞吐 (V1 metrics): nan tok/s
160
+
161
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
162
+ GPU 3: General Metrics for NVIDIA AD10x (any frequency)
163
+ Generating '/tmp/nsys-report-97f6.qdstrm'
164
+
165
+
166
+ [3/8] Executing 'nvtx_sum' stats report
167
+
168
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
169
+ -------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- --------------------------------------
170
+ 91.5 34,993,424,316 1 34,993,424,316.0 34,993,424,316.0 34,993,424,316 34,993,424,316 0.0 PushPop :LLM_init
171
+ 3.7 1,419,326,629 1 1,419,326,629.0 1,419,326,629.0 1,419,326,629 1,419,326,629 0.0 PushPop :generate [prefill640_decode1] bs=1024
172
+ 2.4 933,046,428 1 933,046,428.0 933,046,428.0 933,046,428 933,046,428 0.0 PushPop :generate [prefill640_decode1] bs=512
173
+ 1.0 387,636,888 1 387,636,888.0 387,636,888.0 387,636,888 387,636,888 0.0 PushPop :generate [prefill640_decode1] bs=256
174
+ 0.5 197,863,320 1 197,863,320.0 197,863,320.0 197,863,320 197,863,320 0.0 PushPop :generate [prefill640_decode1] bs=128
175
+ 0.3 106,809,096 1 106,809,096.0 106,809,096.0 106,809,096 106,809,096 0.0 PushPop :generate [prefill640_decode1] bs=64
176
+ 0.2 61,927,285 1 61,927,285.0 61,927,285.0 61,927,285 61,927,285 0.0 PushPop :generate [prefill640_decode1] bs=32
177
+ 0.1 46,494,927 1 46,494,927.0 46,494,927.0 46,494,927 46,494,927 0.0 PushPop :generate [prefill640_decode1] bs=16
178
+ 0.1 36,948,348 1 36,948,348.0 36,948,348.0 36,948,348 36,948,348 0.0 PushPop :generate [prefill640_decode1] bs=8
179
+ 0.1 24,170,392 1 24,170,392.0 24,170,392.0 24,170,392 24,170,392 0.0 PushPop :generate [prefill640_decode1] bs=4
180
+ 0.1 19,267,324 1 19,267,324.0 19,267,324.0 19,267,324 19,267,324 0.0 PushPop :generate [prefill640_decode1] bs=2
181
+ 0.0 10,500,291 1 10,500,291.0 10,500,291.0 10,500,291 10,500,291 0.0 PushPop :generate [prefill640_decode1] bs=1
182
+ 0.0 99,802 2 49,901.0 49,901.0 44,120 55,682 8,175.6 PushPop CCCL:cub::DeviceSegmentedRadixSort
183
+
184
+ [4/8] Executing 'osrt_sum' stats report
185
+
186
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
187
+ -------- --------------- --------- ---------------- ------------ --------- -------------- ---------------- ----------------------
188
+ 31.9 315,906,533,849 13,750 22,975,020.6 26,070.5 1,015 23,666,668,799 454,327,983.2 pthread_cond_timedwait
189
+ 26.2 259,640,374,556 18,725 13,865,974.6 10,062,170.0 1,054 26,101,659,008 334,515,177.4 epoll_wait
190
+ 23.8 236,353,668,764 22 10,743,348,580.2 8,483,110.0 20,022 23,667,768,753 12,042,371,346.2 pthread_cond_wait
191
+ 7.8 77,638,705,690 9,817 7,908,597.9 1,366.0 1,000 10,010,042,018 161,807,052.9 poll
192
+ 6.1 60,833,119,112 28 2,172,611,396.9 27,845.0 9,196 10,000,124,017 3,889,257,455.2 sem_timedwait
193
+ 3.6 35,208,374,909 31,030 1,134,656.0 2,282.0 1,000 21,397,007,347 131,447,597.0 read
194
+ 0.4 3,887,231,292 311 12,499,136.0 7,213,077.0 36,367 322,090,803 29,803,312.7 sem_wait
195
+ 0.0 474,955,889 193,339 2,456.6 1,418.0 1,001 94,954,678 215,965.8 munmap
196
+ 0.0 265,992,610 8,412 31,620.6 9,835.5 1,001 17,044,888 308,579.4 ioctl
197
+ 0.0 222,714,707 369 603,562.9 2,105.0 1,033 20,700,195 3,290,651.8 fopen
198
+ 0.0 121,534,035 24 5,063,918.1 5,063,775.0 5,040,945 5,076,986 7,972.2 nanosleep
199
+ 0.0 81,450,488 30,629 2,659.3 2,313.0 1,000 92,932 1,646.5 open64
200
+ 0.0 74,702,014 133 561,669.3 1,892.0 1,000 66,577,673 5,774,257.5 waitpid
201
+ 0.0 74,234,231 96 773,273.2 3,712.0 1,003 22,285,364 3,739,689.4 open
202
+ 0.0 66,582,083 37 1,799,515.8 480,507.0 3,919 10,336,124 3,413,414.9 pthread_join
203
+ 0.0 63,806,761 10 6,380,676.1 20,040.5 8,233 63,627,951 20,114,646.1 connect
204
+ 0.0 46,888,026 12,161 3,855.6 2,570.0 1,000 1,071,189 14,531.9 mmap64
205
+ 0.0 32,641,092 147 222,048.2 68,846.0 55,054 7,991,497 1,073,129.2 sleep
206
+ 0.0 22,603,585 11,001 2,054.7 1,785.0 1,000 53,255 2,018.4 pthread_cond_signal
207
+ 0.0 17,553,779 2,362 7,431.7 4,197.0 1,033 2,743,900 58,994.9 recv
208
+ 0.0 15,451,862 2,365 6,533.6 5,474.0 1,382 75,703 5,847.3 send
209
+ 0.0 14,269,726 1,497 9,532.2 7,368.0 1,000 543,140 16,462.6 pthread_mutex_lock
210
+ 0.0 13,536,775 4,653 2,909.3 2,179.0 1,006 121,272 4,763.9 write
211
+ 0.0 6,900,801 131 52,677.9 46,855.0 21,276 130,729 19,309.4 pthread_create
212
+ 0.0 5,993,355 4,277 1,401.3 1,350.0 1,000 17,968 515.6 epoll_ctl
213
+ 0.0 5,355,828 887 6,038.1 3,980.0 1,016 43,090 7,169.9 fgets
214
+ 0.0 1,980,565 344 5,757.5 5,562.5 2,076 45,811 2,514.7 fopen64
215
+ 0.0 1,764,653 1,470 1,200.4 1,068.0 1,000 9,187 564.1 fclose
216
+ 0.0 1,642,199 16 102,637.4 81,769.0 3,412 331,583 101,402.9 pthread_rwlock_wrlock
217
+ 0.0 1,173,080 195 6,015.8 3,724.0 1,150 124,348 12,102.3 mmap
218
+ 0.0 1,159,048 61 19,000.8 3,157.0 1,938 319,207 49,006.4 futex
219
+ 0.0 984,289 1 984,289.0 984,289.0 984,289 984,289 0.0 fork
220
+ 0.0 778,197 9 86,466.3 47,775.0 4,586 209,086 76,949.1 pthread_rwlock_rdlock
221
+ 0.0 360,142 65 5,540.6 4,626.0 1,914 16,148 2,939.0 pipe2
222
+ 0.0 236,233 41 5,761.8 4,572.0 2,034 12,662 3,154.3 socket
223
+ 0.0 164,351 25 6,574.0 2,657.0 1,052 62,060 13,072.9 bind
224
+ 0.0 112,444 29 3,877.4 3,400.0 1,217 10,296 2,247.2 pthread_cond_broadcast
225
+ 0.0 69,279 7 9,897.0 7,229.0 3,601 30,113 9,209.7 fread
226
+ 0.0 42,530 5 8,506.0 7,533.0 3,596 14,826 4,155.8 accept4
227
+ 0.0 42,447 27 1,572.1 1,691.0 1,025 1,981 319.5 sigaction
228
+ 0.0 38,331 20 1,916.6 1,877.5 1,040 2,780 659.7 dup2
229
+ 0.0 37,721 23 1,640.0 1,115.0 1,012 5,412 1,165.0 fcntl
230
+ 0.0 37,457 15 2,497.1 2,022.0 1,236 5,421 1,128.1 stat
231
+ 0.0 26,843 8 3,355.4 3,101.5 1,029 7,199 2,500.2 fflush
232
+ 0.0 26,508 10 2,650.8 1,797.0 1,023 7,028 1,852.3 pread
233
+ 0.0 19,153 4 4,788.3 5,290.5 3,065 5,507 1,166.3 lstat
234
+ 0.0 18,990 5 3,798.0 3,258.0 1,533 8,332 2,664.1 fwrite
235
+ 0.0 14,221 4 3,555.3 3,514.0 1,783 5,410 1,691.6 flock
236
+ 0.0 14,049 3 4,683.0 4,386.0 4,334 5,329 560.1 fputs_unlocked
237
+ 0.0 13,821 5 2,764.2 2,680.0 2,269 3,685 568.1 mprotect
238
+ 0.0 12,045 7 1,720.7 1,457.0 1,329 2,524 461.1 listen
239
+ 0.0 9,226 3 3,075.3 2,227.0 1,736 5,263 1,910.4 fstat
240
+ 0.0 7,086 3 2,362.0 1,954.0 1,531 3,601 1,093.7 flockfile
241
+ 0.0 5,765 1 5,765.0 5,765.0 5,765 5,765 0.0 kill
242
+ 0.0 4,207 1 4,207.0 4,207.0 4,207 4,207 0.0 fputs
243
+ 0.0 3,713 2 1,856.5 1,856.5 1,397 2,316 649.8 openat64
244
+ 0.0 1,334 1 1,334.0 1,334.0 1,334 1,334 0.0 pthread_mutex_trylock
245
+
246
+ [5/8] Executing 'cuda_api_sum' stats report
247
+
248
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
249
+ -------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
250
+ 31.1 698,997,620 2,853 245,004.4 8,462.0 3,452 100,828,816 2,074,316.5 cudaMemcpyAsync
251
+ 25.2 565,993,891 37,122 15,246.9 7,556.0 745 58,676,993 479,701.4 cudaLaunchKernel
252
+ 9.5 214,098,539 1,943 110,189.7 70,984.0 41,015 1,466,127 183,798.1 cudaGraphInstantiateWithFlags_v11040
253
+ 9.0 203,147,257 1,162 174,825.5 3,666.0 1,917 144,202,830 4,251,626.0 cudaStreamSynchronize
254
+ 6.1 136,018,801 2,136 63,679.2 30,541.5 2,774 71,669,241 1,550,140.5 cudaDeviceSynchronize
255
+ 5.7 127,417,770 7,569 16,834.2 15,015.0 9,892 226,168 6,274.9 cudaGraphLaunch_v10000
256
+ 4.6 102,280,934 32,723 3,125.7 3,725.0 617 163,047 2,253.8 cuLaunchKernel
257
+ 2.4 54,199,644 222 244,142.5 119,315.0 63,972 2,400,814 360,610.3 cudaFree
258
+ 1.9 41,686,022 348 119,787.4 110,186.0 4,423 1,038,043 58,088.4 cudaMalloc
259
+ 1.1 24,826,279 10 2,482,627.9 2,644,171.0 54,814 4,281,423 1,392,790.3 cuLibraryLoadData
260
+ 0.7 15,112,641 5,582 2,707.4 1,648.5 157 37,305 2,202.8 cudaMemsetAsync
261
+ 0.5 11,339,187 169 67,095.8 76,671.0 26,797 270,701 31,403.2 cuModuleLoadData
262
+ 0.5 10,277,547 9,570 1,073.9 392.0 261 4,171,570 44,224.2 cuKernelGetFunction
263
+ 0.4 9,823,524 9,870 995.3 941.0 307 9,908 277.0 cudaStreamIsCapturing_v10000
264
+ 0.4 9,016,831 18,895 477.2 454.0 307 6,000 104.4 cudaStreamGetCaptureInfo_v2_v11030
265
+ 0.3 7,557,078 1,943 3,889.4 3,805.0 2,935 11,648 648.9 cudaStreamBeginCapture_v10000
266
+ 0.3 7,189,458 1,943 3,700.2 3,649.0 2,354 11,574 558.8 cudaGraphDestroy_v10000
267
+ 0.1 2,831,657 128 22,122.3 2,001.0 1,222 925,085 116,327.5 cudaStreamCreateWithPriority
268
+ 0.1 2,563,056 1,943 1,319.1 1,298.0 1,039 7,104 215.9 cudaStreamEndCapture_v10000
269
+ 0.1 1,525,954 1,943 785.4 713.0 606 8,744 295.8 cudaGraphGetNodes_v10000
270
+ 0.1 1,502,447 14 107,317.6 5,457.0 4,455 1,421,402 378,224.4 cudaHostAlloc
271
+ 0.0 232,971 8 29,121.4 27,970.5 12,623 64,091 17,478.2 cudaMemGetInfo
272
+ 0.0 138,843 810 171.4 142.0 80 1,544 110.5 cuGetProcAddress_v2
273
+ 0.0 18,339 16 1,146.2 740.5 378 6,387 1,432.6 cuLibraryGetKernel
274
+ 0.0 16,302 19 858.0 353.0 278 4,451 1,090.8 cudaEventCreateWithFlags
275
+ 0.0 8,126 14 580.4 559.5 305 1,104 204.3 cudaThreadExchangeStreamCaptureMode_v10010
276
+ 0.0 4,845 1 4,845.0 4,845.0 4,845 4,845 0.0 cudaEventRecord
277
+ 0.0 4,537 3 1,512.3 1,306.0 1,229 2,002 425.8 cuInit
278
+ 0.0 3,158 1 3,158.0 3,158.0 3,158 3,158 0.0 cudaStreamWaitEvent
279
+ 0.0 2,649 4 662.3 662.5 142 1,182 567.7 cuModuleGetLoadingMode
280
+ 0.0 1,385 1 1,385.0 1,385.0 1,385 1,385 0.0 cudaEventDestroy
281
+ 0.0 1,056 2 528.0 528.0 260 796 379.0 cudaGetDriverEntryPoint_v11030
282
+
283
+ [6/8] Executing 'cuda_gpu_kern_sum' stats report
284
+
285
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
286
+ -------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
287
+ 17.7 192,070,994 3,840 50,018.5 22,048.5 7,616 540,899 42,110.2 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
288
+ 14.5 157,599,907 924 170,562.7 162,689.0 39,968 1,415,303 225,612.7 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
289
+ 12.7 137,760,865 689 199,943.2 60,640.0 10,688 521,827 229,882.5 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
290
+ 12.0 130,433,090 7,336 17,779.9 22,656.0 6,848 25,440 6,404.2 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
291
+ 7.2 78,491,480 28 2,803,267.1 2,805,165.5 2,788,142 2,808,782 5,107.2 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
292
+ 6.4 69,018,876 3,220 21,434.4 21,376.0 21,280 23,456 199.8 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
293
+ 3.6 39,090,744 8 4,886,343.0 4,840,279.0 4,794,230 5,090,744 113,470.9 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
294
+ 3.0 32,798,268 140 234,273.3 229,890.0 122,272 377,250 83,663.6 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
295
+ 2.1 22,635,863 784 28,872.3 12,480.0 11,584 62,177 20,637.7 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
296
+ 2.0 21,990,540 1,960 11,219.7 4,416.5 1,055 461,410 54,161.7 triton_poi_fused_mul_silu_1
297
+ 1.9 20,356,642 4 5,089,160.5 5,082,872.5 4,909,239 5,281,658 194,105.4 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
298
+ 1.3 14,291,715 28 510,418.4 511,890.5 469,186 513,250 8,099.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
299
+ 1.2 12,995,135 142 91,515.0 51,536.5 50,401 3,036,431 336,087.1 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
300
+ 1.0 11,064,209 7,336 1,508.2 1,504.0 1,023 2,304 145.4 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
301
+ 0.9 10,243,687 1,960 5,226.4 3,488.0 1,504 111,552 12,600.6 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
302
+ 0.9 9,917,616 100 99,176.2 8,896.0 7,008 499,075 172,432.9 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
303
+ 0.9 9,765,487 4 2,441,371.8 2,442,108.0 2,393,515 2,487,756 51,684.9 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
304
+ 0.8 9,150,993 28 326,821.2 326,786.0 324,833 330,529 1,012.2 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
305
+ 0.8 8,402,949 224 37,513.2 37,504.0 36,417 38,720 433.0 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
306
+ 0.7 7,801,348 2 3,900,674.0 3,900,674.0 3,710,257 4,091,091 269,290.3 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
307
+ 0.7 7,230,573 336 21,519.6 21,440.0 21,056 22,592 353.9 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
308
+ 0.6 6,377,065 1,960 3,253.6 2,047.0 1,535 79,073 9,036.2 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
309
+ 0.6 6,104,261 476 12,824.1 12,800.0 11,648 14,688 734.1 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
310
+ 0.5 5,896,026 4 1,474,006.5 1,473,894.0 1,473,351 1,474,887 642.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
311
+ 0.5 5,005,878 3,220 1,554.6 1,537.0 1,471 1,792 48.7 void vllm::merge_attn_states_kernel<__nv_bfloat16, (unsigned int)128>(T1 *, float *, const T1 *, co…
312
+ 0.4 4,533,198 266 17,042.1 6,783.0 5,376 713,379 86,005.8 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
313
+ 0.4 4,394,579 140 31,389.9 27,296.0 26,272 49,792 8,301.1 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
314
+ 0.4 4,086,818 1,890 2,162.3 1,824.0 1,344 22,016 2,254.8 triton_poi_fused_cat_3
315
+ 0.4 3,998,356 2 1,999,178.0 1,999,178.0 1,997,546 2,000,810 2,308.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
316
+ 0.3 3,593,749 56 64,174.1 64,144.0 63,169 65,280 415.2 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
317
+ 0.3 3,432,593 4 858,148.3 858,260.0 855,589 860,484 2,519.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
318
+ 0.3 3,410,108 4,153 821.1 800.0 767 1,280 73.4 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
319
+ 0.3 3,365,160 264 12,746.8 5,088.0 2,111 1,005,636 86,907.4 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
320
+ 0.3 3,200,784 2 1,600,392.0 1,600,392.0 1,565,032 1,635,752 50,006.6 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
321
+ 0.3 2,854,457 1,512 1,887.9 1,728.0 1,311 2,944 453.9 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
322
+ 0.2 2,697,774 1,890 1,427.4 1,152.0 863 16,704 1,774.4 triton_poi_fused_view_5
323
+ 0.2 2,650,361 1,890 1,402.3 1,344.0 1,215 6,688 549.7 triton_poi_fused_cat_4
324
+ 0.2 2,583,787 2 1,291,893.5 1,291,893.5 1,291,526 1,292,261 519.7 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
325
+ 0.2 2,581,068 2 1,290,534.0 1,290,534.0 1,289,766 1,291,302 1,086.1 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
326
+ 0.2 2,423,217 112 21,635.9 21,648.0 9,472 34,464 12,027.3 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
327
+ 0.1 1,370,950 2 685,475.0 685,475.0 672,835 698,115 17,875.7 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
328
+ 0.1 1,299,525 398 3,265.1 3,072.0 2,848 7,488 437.7 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
329
+ 0.1 1,149,285 28 41,045.9 41,024.0 40,288 42,689 427.9 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
330
+ 0.1 956,389 28 34,156.8 34,767.5 17,888 35,200 3,196.0 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
331
+ 0.1 653,123 28 23,325.8 23,263.5 23,009 24,224 280.2 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn
332
+ 0.0 523,203 1 523,203.0 523,203.0 523,203 523,203 0.0 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x1_tn_align8>(T1::Par…
333
+ 0.0 365,189 262 1,393.9 1,408.0 1,183 1,792 172.1 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
334
+ 0.0 331,960 264 1,257.4 1,248.0 1,120 1,697 54.2 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
335
+ 0.0 295,934 168 1,761.5 1,760.0 1,536 2,048 122.7 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
336
+ 0.0 258,083 70 3,686.9 3,040.0 1,984 36,192 4,006.4 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
337
+ 0.0 239,642 262 914.7 896.0 895 993 29.3 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
338
+ 0.0 232,625 262 887.9 865.0 863 960 30.4 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
339
+ 0.0 192,230 70 2,746.1 2,112.0 1,632 40,192 4,554.3 triton_poi_fused_cat_1
340
+ 0.0 157,665 1 157,665.0 157,665.0 157,665 157,665 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
341
+ 0.0 143,357 159 901.6 896.0 863 992 29.4 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
342
+ 0.0 100,929 70 1,441.8 1,344.0 1,216 8,928 910.7 triton_poi_fused_cat_2
343
+ 0.0 99,267 70 1,418.1 1,184.5 864 13,728 1,523.5 triton_poi_fused_view_3
344
+ 0.0 95,967 107 896.9 896.0 863 1,824 94.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
345
+ 0.0 79,489 1 79,489.0 79,489.0 79,489 79,489 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
346
+ 0.0 64,187 58 1,106.7 896.0 865 11,328 1,367.7 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
347
+ 0.0 43,840 1 43,840.0 43,840.0 43,840 43,840 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
348
+ 0.0 36,673 28 1,309.8 1,312.0 1,280 1,345 15.0 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
349
+ 0.0 26,400 1 26,400.0 26,400.0 26,400 26,400 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
350
+ 0.0 19,360 1 19,360.0 19,360.0 19,360 19,360 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
351
+ 0.0 11,484 11 1,044.0 895.0 767 1,567 287.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
352
+ 0.0 10,848 2 5,424.0 5,424.0 5,216 5,632 294.2 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
353
+ 0.0 8,705 2 4,352.5 4,352.5 4,160 4,545 272.2 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
354
+ 0.0 3,648 2 1,824.0 1,824.0 1,632 2,016 271.5 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
355
+ 0.0 3,455 2 1,727.5 1,727.5 1,695 1,760 46.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
356
+ 0.0 3,135 2 1,567.5 1,567.5 1,503 1,632 91.2 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
357
+ 0.0 3,104 2 1,552.0 1,552.0 1,344 1,760 294.2 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
358
+ 0.0 3,040 2 1,520.0 1,520.0 1,440 1,600 113.1 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
359
+ 0.0 2,975 2 1,487.5 1,487.5 992 1,983 700.7 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
360
+ 0.0 2,336 1 2,336.0 2,336.0 2,336 2,336 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
361
+ 0.0 1,184 1 1,184.0 1,184.0 1,184 1,184 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
362
+ 0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
363
+ 0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
364
+ 0.0 928 1 928.0 928.0 928 928 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
365
+
366
+ [7/8] Executing 'cuda_gpu_mem_time_sum' stats report
367
+
368
+ Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
369
+ -------- --------------- ----- ----------- ----------- --------- ----------- ----------- ------------------------------
370
+ 98.7 554,163,217 2,587 214,210.8 352.0 288 100,447,087 2,156,381.0 [CUDA memcpy Host-to-Device]
371
+ 1.0 5,437,594 4 1,359,398.5 1,359,767.0 1,356,454 1,361,606 2,174.7 [CUDA memcpy Device-to-Device]
372
+ 0.3 1,567,258 2,670 587.0 480.0 288 2,240 244.5 [CUDA memset]
373
+ 0.1 296,577 262 1,132.0 1,120.0 864 1,536 80.4 [CUDA memcpy Device-to-Host]
374
+
375
+ [8/8] Executing 'cuda_gpu_mem_size_sum' stats report
376
+
377
+ Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
378
+ ---------- ----- -------- -------- -------- -------- ----------- ------------------------------
379
+ 3,090.501 2,587 1.195 0.001 0.000 466.747 10.389 [CUDA memcpy Host-to-Device]
380
+ 2,489.319 4 622.330 622.330 622.330 622.330 0.000 [CUDA memcpy Device-to-Device]
381
+ 2.029 2,670 0.001 0.001 0.000 0.006 0.001 [CUDA memset]
382
+ 0.008 262 0.000 0.000 0.000 0.000 0.000 [CUDA memcpy Device-to-Host]
383
+
384
+ Generated:
385
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill.nsys-rep
386
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill.sqlite
sim_traverse_bs/traverse_bs_util_sim_prefill.nsys-rep ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd4985005949c5e5485a73c83659883635c2cdf0895cadd4deb604d1bc89a15b
3
+ size 17188445
sim_traverse_bs/traverse_bs_util_sim_prefill.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import statistics
4
+ from typing import List, Tuple, Dict
5
+
6
+ import torch
7
+ import torch.cuda.nvtx as nvtx
8
+
9
+ from vllm import LLM, SamplingParams
10
+ from transformers import AutoTokenizer
11
+
12
+ # ========= 强制使用 vLLM V1 =========
13
+ os.environ.setdefault("VLLM_USE_V1", "1")
14
+ os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
15
+
16
+ # 可选:打开 V1 metrics 统计
17
+ os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
18
+
19
+ # ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
20
+ try:
21
+ from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
22
+ except Exception:
23
+ Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
24
+
25
+ # ========= 配置 =========
26
+ MODEL_NAME = "Qwen/Qwen2-1.5B"
27
+ DTYPE = "bfloat16"
28
+ TP = 1
29
+ GPU_MEM_UTIL = 0.90
30
+ TRUST_REMOTE_CODE = True
31
+
32
+ # 场景:prefill=输入tokens,decode=输出tokens
33
+ SCENARIOS = [
34
+ {"name": "prefill640_decode1", "prompt_tokens": 1152, "max_new_tokens": 1},
35
+ # {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
36
+ # {"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
37
+ ]
38
+
39
+ BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
40
+ # BATCH_SIZES = [1, 1024]
41
+
42
+ SEED = 1234
43
+ TEMPERATURE = 0.0
44
+ TOP_P = 1.0
45
+ WARMUP_PER_BS = 1 # 每个批次做一次预热
46
+
47
+ # ========= 构造“精确 token 数量”的 prompt =========
48
+ def build_exact_token_prompt(tokenizer, target_len: int) -> str:
49
+ if target_len <= 1:
50
+ # 最小化 prompt:用一个简单 token(避免空串导致0 token)
51
+ ids = tokenizer("A", add_special_tokens=False)["input_ids"]
52
+ if len(ids) >= 1:
53
+ return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
54
+
55
+ base_text = (
56
+ "You are a helpful assistant. "
57
+ "Please analyze the following input and respond succinctly. "
58
+ )
59
+ chunk = " ".join(["data"] * 100) + ". "
60
+ text = base_text + chunk * 200 # 足够长的文本
61
+
62
+ lo, hi = 0, len(text)
63
+ target_ids = None
64
+ while lo <= hi:
65
+ mid = (lo + hi) // 2
66
+ ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
67
+ if len(ids) == target_len:
68
+ target_ids = ids
69
+ break
70
+ if len(ids) < target_len:
71
+ lo = mid + 1
72
+ else:
73
+ hi = mid - 1
74
+
75
+ if target_ids is None:
76
+ ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
77
+ if len(ids) > target_len:
78
+ target_ids = ids[:target_len]
79
+ else:
80
+ filler = " data"
81
+ while len(ids) < target_len:
82
+ ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
83
+ target_ids = ids[:target_len]
84
+
85
+ prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
86
+ # 断言精确长度
87
+ assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
88
+ return prompt
89
+
90
+ # ========= V1 metrics 抽取工具 =========
91
+ TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
92
+ TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
93
+
94
+ def _iter_children_of_vector(vec_obj):
95
+ for attr in ("children", "metrics", "series", "values", "samples", "items"):
96
+ if hasattr(vec_obj, attr):
97
+ val = getattr(vec_obj, attr)
98
+ if isinstance(val, dict):
99
+ for v in val.values():
100
+ yield v
101
+ else:
102
+ try:
103
+ for v in val:
104
+ yield v
105
+ except TypeError:
106
+ pass
107
+
108
+ def _collect_hist_sum_count(metrics, metric_name: str):
109
+ total_sum = 0.0
110
+ total_count = 0.0
111
+ for m in metrics:
112
+ mname = getattr(m, "name", None)
113
+ if mname != metric_name:
114
+ continue
115
+ # 直接 Histogram
116
+ if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
117
+ total_sum += float(getattr(m, "sum", 0.0))
118
+ total_count += float(getattr(m, "count", 0.0))
119
+ continue
120
+ # Vector[Histogram]
121
+ if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
122
+ for child in _iter_children_of_vector(m):
123
+ if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
124
+ total_sum += float(getattr(child, "sum", 0.0))
125
+ total_count += float(getattr(child, "count", 0.0))
126
+ return total_sum, total_count
127
+
128
+ def _metrics_snapshot(llm) -> Dict[str, float]:
129
+ try:
130
+ mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
131
+ except Exception:
132
+ return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
133
+ ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
134
+ tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
135
+ return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
136
+
137
+ def _metrics_delta(before: dict, after: dict):
138
+ return {
139
+ "ttft_sum": after["ttft_sum"] - before["ttft_sum"],
140
+ "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
141
+ "tpot_sum": after["tpot_sum"] - before["tpot_sum"],
142
+ "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
143
+ }
144
+
145
+ # ========= 带 NVTX 的 generate 包装 =========
146
+ def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
147
+ return llm.generate(prompts, params)
148
+
149
+ # ========= 统计格式化 =========
150
+ def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
151
+ xs = [v for v in x if (v == v)] # 过滤 NaN
152
+ if not xs:
153
+ return (float("nan"), float("nan"), float("nan"))
154
+ return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
155
+
156
+ def main():
157
+ print("--- vLLM V1 基准测试(含 NVTX 标记)---")
158
+ print(f"模型: {MODEL_NAME}")
159
+ print(f"批量大小: {BATCH_SIZES}")
160
+ print(f"场景: {[s['name'] for s in SCENARIOS]}")
161
+ print("-" * 60)
162
+
163
+ if not torch.cuda.is_available():
164
+ print("错误:需要 CUDA GPU。")
165
+ return
166
+
167
+ print("加载分词器/模型中...")
168
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
169
+
170
+ # 用 NVTX 标记模型加载阶段
171
+ nvtx.range_push("LLM_init")
172
+ llm = LLM(
173
+ model=MODEL_NAME,
174
+ tensor_parallel_size=TP,
175
+ dtype=DTYPE,
176
+ trust_remote_code=TRUST_REMOTE_CODE,
177
+ gpu_memory_utilization=GPU_MEM_UTIL,
178
+ max_num_seqs=1024, # 足够覆盖本次扫描
179
+ max_model_len=4096,
180
+ disable_log_stats=False, # 开启 V1 metrics 收集
181
+ )
182
+ nvtx.range_pop()
183
+ print("模型加载完成。")
184
+
185
+ for sc in SCENARIOS:
186
+ name = sc["name"]
187
+ prompt_tokens = sc["prompt_tokens"]
188
+ max_new_tokens = sc["max_new_tokens"]
189
+
190
+ print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
191
+
192
+ # 准备精确长度 prompt
193
+ prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
194
+
195
+ # 采样参数(贪心)
196
+ sampling_params = SamplingParams(
197
+ max_tokens=max_new_tokens,
198
+ temperature=TEMPERATURE,
199
+ top_p=TOP_P,
200
+ seed=SEED,
201
+ n=1,
202
+ )
203
+
204
+ # 记录每个 bs 的结果(便于后续统计或外部解析)
205
+ for bs in BATCH_SIZES:
206
+ print(f"\n--- 批量大小 bs={bs} ---")
207
+
208
+ prompts = [prompt_text] * bs
209
+
210
+ # 预热
211
+ # print("预热中...")
212
+ # nvtx.range_push(f"WARMUP [{name}] bs={bs}")
213
+ # _ = decorated_generate(llm, [prompts[0]], sampling_params)
214
+ # torch.cuda.synchronize()
215
+ # nvtx.range_pop()
216
+ if bs== 1:
217
+ print("预热中...")
218
+ for _ in range(WARMUP_PER_BS):
219
+ _ = decorated_generate(llm, [prompts[0]], sampling_params)
220
+ torch.cuda.synchronize()
221
+
222
+ # 正式计时与 V1 metrics
223
+ # nvtx.range_push(f"RUN [{name}] bs={bs}")
224
+ torch.cuda.synchronize()
225
+ snap_before = _metrics_snapshot(llm)
226
+ t0 = time.perf_counter()
227
+
228
+ nvtx.range_push(f"generate [{name}] bs={bs}")
229
+ outputs = decorated_generate(llm, prompts, sampling_params)
230
+ nvtx.range_pop() # generate
231
+
232
+ torch.cuda.synchronize()
233
+ t1 = time.perf_counter()
234
+ snap_after = _metrics_snapshot(llm)
235
+ # nvtx.range_pop() # RUN
236
+
237
+ duration = t1 - t0
238
+
239
+ # 统计 token 与吞吐
240
+ total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
241
+ avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
242
+ throughput = total_output_tokens / duration if duration > 0 else float("inf")
243
+
244
+ # 解析 V1 TTFT / 解码吞吐
245
+ delta = _metrics_delta(snap_before, snap_after)
246
+ if delta["ttft_cnt"] > 0:
247
+ ttft = delta["ttft_sum"] / delta["ttft_cnt"]
248
+ else:
249
+ ttft = float("nan")
250
+
251
+ if delta["tpot_cnt"] > 0:
252
+ avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
253
+ decode_tps = 1.0 / avg_tpot
254
+ else:
255
+ decode_tps = float("nan")
256
+
257
+ print(f"执行时间: {duration:.4f} s")
258
+ print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
259
+ print(f"生成总 tokens: {total_output_tokens}")
260
+ print(f"吞吐(生成tokens/秒): {throughput:.2f}")
261
+ print(f"TTFT (V1 metrics): {ttft:.4f} s")
262
+ print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
263
+
264
+ print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
265
+
266
+ if __name__ == "__main__":
267
+ print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
268
+ main()
sim_traverse_bs/traverse_bs_util_sim_prefill_1152.log ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WARNING: CPU IP/backtrace sampling not supported, disabling.
2
+ Try the 'nsys status --environment' command to learn more.
3
+
4
+ WARNING: CPU context switch tracing not supported, disabling.
5
+ Try the 'nsys status --environment' command to learn more.
6
+
7
+ INFO 08-14 10:12:47 [__init__.py:235] Automatically detected platform cuda.
8
+ CUDA_VISIBLE_DEVICES = 3
9
+ --- vLLM V1 基准测试(含 NVTX 标记)---
10
+ 模型: Qwen/Qwen2-1.5B
11
+ 批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
12
+ 场景: ['prefill640_decode1']
13
+ ------------------------------------------------------------
14
+ 加载分词器/模型中...
15
+ INFO 08-14 10:12:58 [config.py:1604] Using max model len 4096
16
+ INFO 08-14 10:12:58 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
17
+ INFO 08-14 10:13:05 [__init__.py:235] Automatically detected platform cuda.
18
+ INFO 08-14 10:13:07 [core.py:572] Waiting for init message from front-end.
19
+ INFO 08-14 10:13:07 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
20
+ INFO 08-14 10:13:14 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
21
+ WARNING 08-14 10:13:14 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
22
+ INFO 08-14 10:13:14 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
23
+ INFO 08-14 10:13:15 [gpu_model_runner.py:1875] Loading model from scratch...
24
+ INFO 08-14 10:13:15 [cuda.py:290] Using Flash Attention backend on V1 engine.
25
+ INFO 08-14 10:13:15 [weight_utils.py:296] Using model weights format ['*.safetensors']
26
+ INFO 08-14 10:13:16 [weight_utils.py:349] No model.safetensors.index.json found in remote.
27
+
28
+
29
+
30
+
31
+ INFO 08-14 10:13:17 [default_loader.py:262] Loading weights took 0.66 seconds
32
+ INFO 08-14 10:13:17 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 1.885122 seconds
33
+ INFO 08-14 10:13:23 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
34
+ INFO 08-14 10:13:23 [backends.py:541] Dynamo bytecode transform time: 6.01 s
35
+ INFO 08-14 10:13:28 [backends.py:161] Directly load the compiled graph(s) for dynamic shape from the cache, took 4.662 s
36
+ INFO 08-14 10:13:29 [monitor.py:34] torch.compile takes 6.01 s in total
37
+ INFO 08-14 10:13:30 [gpu_worker.py:255] Available KV cache memory: 12.81 GiB
38
+ INFO 08-14 10:13:30 [kv_cache_utils.py:833] GPU KV cache size: 479,536 tokens
39
+ INFO 08-14 10:13:30 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.07x
40
+
41
+ INFO 08-14 10:13:33 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
42
+ INFO 08-14 10:13:33 [core.py:193] init engine (profile, create kv cache, warmup model) took 15.81 seconds
43
+ 模型加载完成。
44
+
45
+ ===== 场景:prefill640_decode1 | prefill=1152, decode=1 =====
46
+
47
+ --- 批量大小 bs=1 ---
48
+ 预热中...
49
+
50
+
51
+
52
+
53
+ 执行时间: 0.0159 s
54
+ 实际平均输入 tokens: 1152.00(目标 1152)
55
+ 生成总 tokens: 1
56
+ 吞吐(生成tokens/秒): 62.80
57
+ TTFT (V1 metrics): 0.0147 s
58
+ 解码吞吐 (V1 metrics): nan tok/s
59
+
60
+ --- 批量大小 bs=2 ---
61
+
62
+
63
+ 执行时间: 0.0213 s
64
+ 实际平均输入 tokens: 1152.00(目标 1152)
65
+ 生成总 tokens: 2
66
+ 吞吐(生成tokens/秒): 94.11
67
+ TTFT (V1 metrics): 0.0155 s
68
+ 解码吞吐 (V1 metrics): nan tok/s
69
+
70
+ --- 批量大小 bs=4 ---
71
+
72
+
73
+ 执行时间: 0.0305 s
74
+ 实际平均输入 tokens: 1152.00(目标 1152)
75
+ 生成总 tokens: 4
76
+ 吞吐(生成tokens/秒): 131.32
77
+ TTFT (V1 metrics): 0.0172 s
78
+ 解码吞吐 (V1 metrics): nan tok/s
79
+
80
+ --- 批量大小 bs=8 ---
81
+
82
+
83
+ 执行时间: 0.0400 s
84
+ 实际平均输入 tokens: 1152.00(目标 1152)
85
+ 生成总 tokens: 8
86
+ 吞吐(生成tokens/秒): 199.98
87
+ TTFT (V1 metrics): 0.0204 s
88
+ 解码吞吐 (V1 metrics): nan tok/s
89
+
90
+ --- 批量大小 bs=16 ---
91
+
92
+
93
+ 执行时间: 0.0617 s
94
+ 实际平均输入 tokens: 1152.00(目标 1152)
95
+ 生成总 tokens: 16
96
+ 吞吐(生成tokens/秒): 259.19
97
+ TTFT (V1 metrics): 0.0285 s
98
+ 解码吞吐 (V1 metrics): nan tok/s
99
+
100
+ --- 批量大小 bs=32 ---
101
+
102
+
103
+ 执行时间: 0.1109 s
104
+ 实际平均输入 tokens: 1152.00(目标 1152)
105
+ 生成总 tokens: 32
106
+ 吞吐(生成tokens/秒): 288.55
107
+ TTFT (V1 metrics): 0.0514 s
108
+ 解码吞吐 (V1 metrics): nan tok/s
109
+
110
+ --- 批量大小 bs=64 ---
111
+
112
+
113
+ 执行时间: 0.2001 s
114
+ 实际平均输入 tokens: 1152.00(目标 1152)
115
+ 生成总 tokens: 64
116
+ 吞吐(生成tokens/秒): 319.77
117
+ TTFT (V1 metrics): 0.0997 s
118
+ 解码吞吐 (V1 metrics): nan tok/s
119
+
120
+ --- 批量大小 bs=128 ---
121
+
122
+
123
+ 执行时间: 0.3899 s
124
+ 实际平均输入 tokens: 1152.00(目标 1152)
125
+ 生成总 tokens: 128
126
+ 吞吐(生成tokens/秒): 328.30
127
+ TTFT (V1 metrics): 0.1963 s
128
+ 解码吞吐 (V1 metrics): nan tok/s
129
+
130
+ --- 批量大小 bs=256 ---
131
+
132
+
133
+ 执行时间: 1.0404 s
134
+ 实际平均输入 tokens: 1152.00(目标 1152)
135
+ 生成总 tokens: 256
136
+ 吞吐(生成tokens/秒): 246.06
137
+ TTFT (V1 metrics): 0.6454 s
138
+ 解码吞吐 (V1 metrics): nan tok/s
139
+
140
+ --- 批量大小 bs=512 ---
141
+
142
+
143
+ 执行时间: 1.4178 s
144
+ 实际平均输入 tokens: 1152.00(目标 1152)
145
+ 生成总 tokens: 512
146
+ 吞吐(生成tokens/秒): 361.12
147
+ TTFT (V1 metrics): 0.6722 s
148
+ 解码吞吐 (V1 metrics): nan tok/s
149
+
150
+ --- 批量大小 bs=1024 ---
151
+
152
+
153
+ [rank0]:[W814 10:13:41.589921824 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
154
+ 执行时间: 2.4235 s
155
+ 实际平均输入 tokens: 1152.00(目标 1152)
156
+ 生成总 tokens: 1024
157
+ 吞吐(生成tokens/秒): 422.54
158
+ TTFT (V1 metrics): 1.2129 s
159
+ 解码吞吐 (V1 metrics): nan tok/s
160
+
161
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
162
+ GPU 3: General Metrics for NVIDIA AD10x (any frequency)
163
+ Generating '/tmp/nsys-report-18f3.qdstrm'
164
+
165
+
166
+ [3/8] Executing 'nvtx_sum' stats report
167
+
168
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
169
+ -------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- --------------------------------------
170
+ 88.5 44,070,406,690 1 44,070,406,690.0 44,070,406,690.0 44,070,406,690 44,070,406,690 0.0 PushPop :LLM_init
171
+ 4.9 2,423,366,261 1 2,423,366,261.0 2,423,366,261.0 2,423,366,261 2,423,366,261 0.0 PushPop :generate [prefill640_decode1] bs=1024
172
+ 2.8 1,417,657,581 1 1,417,657,581.0 1,417,657,581.0 1,417,657,581 1,417,657,581 0.0 PushPop :generate [prefill640_decode1] bs=512
173
+ 2.1 1,040,325,509 1 1,040,325,509.0 1,040,325,509.0 1,040,325,509 1,040,325,509 0.0 PushPop :generate [prefill640_decode1] bs=256
174
+ 0.8 389,808,438 1 389,808,438.0 389,808,438.0 389,808,438 389,808,438 0.0 PushPop :generate [prefill640_decode1] bs=128
175
+ 0.4 200,051,514 1 200,051,514.0 200,051,514.0 200,051,514 200,051,514 0.0 PushPop :generate [prefill640_decode1] bs=64
176
+ 0.2 110,723,882 1 110,723,882.0 110,723,882.0 110,723,882 110,723,882 0.0 PushPop :generate [prefill640_decode1] bs=32
177
+ 0.1 61,663,721 1 61,663,721.0 61,663,721.0 61,663,721 61,663,721 0.0 PushPop :generate [prefill640_decode1] bs=16
178
+ 0.1 39,934,013 1 39,934,013.0 39,934,013.0 39,934,013 39,934,013 0.0 PushPop :generate [prefill640_decode1] bs=8
179
+ 0.1 30,382,401 1 30,382,401.0 30,382,401.0 30,382,401 30,382,401 0.0 PushPop :generate [prefill640_decode1] bs=4
180
+ 0.0 21,185,162 1 21,185,162.0 21,185,162.0 21,185,162 21,185,162 0.0 PushPop :generate [prefill640_decode1] bs=2
181
+ 0.0 15,818,426 1 15,818,426.0 15,818,426.0 15,818,426 15,818,426 0.0 PushPop :generate [prefill640_decode1] bs=1
182
+ 0.0 104,750 2 52,375.0 52,375.0 47,675 57,075 6,646.8 PushPop CCCL:cub::DeviceSegmentedRadixSort
183
+
184
+ [4/8] Executing 'osrt_sum' stats report
185
+
186
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
187
+ -------- --------------- --------- ---------------- ------------ --------- -------------- ---------------- ----------------------
188
+ 30.5 361,093,251,838 18,099 19,951,005.7 22,399.0 1,000 26,543,007,832 444,078,095.4 pthread_cond_timedwait
189
+ 27.4 324,888,102,277 20,985 15,481,920.5 10,061,722.0 1,004 34,595,644,343 416,535,555.9 epoll_wait
190
+ 22.4 265,117,572,216 21 12,624,646,296.0 9,618,795.0 18,952 26,544,576,053 13,564,859,509.6 pthread_cond_wait
191
+ 8.7 103,158,712,529 12,027 8,577,260.5 1,332.0 1,000 10,010,048,294 168,486,764.8 poll
192
+ 6.3 74,091,760,041 28 2,646,134,287.2 172,477.0 77,298 10,000,094,040 4,119,108,342.9 sem_timedwait
193
+ 4.0 46,893,474,571 32,679 1,434,972.8 3,126.0 1,000 24,724,048,183 164,795,577.6 read
194
+ 0.6 6,727,161,428 745 9,029,746.9 6,344,952.0 26,533 529,046,356 30,627,414.0 sem_wait
195
+ 0.0 480,836,474 193,689 2,482.5 1,588.0 1,001 75,149,274 170,780.2 munmap
196
+ 0.0 328,343,219 8,501 38,624.1 10,820.0 1,002 30,644,743 467,164.9 ioctl
197
+ 0.0 251,356,732 370 679,342.5 2,868.5 1,122 19,761,752 3,411,160.1 fopen
198
+ 0.0 121,530,743 24 5,063,781.0 5,062,739.5 5,054,831 5,078,176 7,766.8 nanosleep
199
+ 0.0 102,831,793 30,682 3,351.5 2,838.0 1,021 121,668 2,147.4 open64
200
+ 0.0 88,351,007 16,056 5,502.7 2,407.0 1,000 15,130,665 165,401.9 mmap64
201
+ 0.0 82,243,111 49 1,678,430.8 4,835.0 1,004 75,416,346 10,759,062.2 waitpid
202
+ 0.0 72,255,858 96 752,665.2 3,804.0 1,029 19,707,249 3,616,782.1 open
203
+ 0.0 68,970,368 38 1,815,009.7 659,953.5 2,972 10,358,269 3,351,149.7 pthread_join
204
+ 0.0 56,563,497 10 5,656,349.7 22,802.5 10,900 56,234,234 17,771,314.4 connect
205
+ 0.0 30,201,734 2,482 12,168.3 8,512.0 1,026 424,091 16,296.8 pthread_mutex_lock
206
+ 0.0 29,341,923 15,018 1,953.8 1,793.0 1,000 71,749 1,783.3 pthread_cond_signal
207
+ 0.0 22,096,068 2,803 7,883.0 4,886.0 1,024 3,311,062 64,777.7 recv
208
+ 0.0 17,172,528 2,802 6,128.7 5,160.0 1,857 89,996 6,209.7 send
209
+ 0.0 16,852,837 5,836 2,887.7 2,233.0 1,006 79,284 4,761.4 write
210
+ 0.0 9,855,547 147 67,044.5 68,806.0 55,639 86,945 4,938.1 sleep
211
+ 0.0 8,928,549 131 68,156.9 65,190.0 19,928 172,451 29,775.9 pthread_create
212
+ 0.0 7,702,498 910 8,464.3 5,459.5 1,005 92,618 10,147.4 fgets
213
+ 0.0 4,416,671 3,034 1,455.7 1,368.0 1,000 11,404 539.8 epoll_ctl
214
+ 0.0 2,829,014 20 141,450.7 186,919.5 9,591 225,750 82,026.0 pthread_rwlock_wrlock
215
+ 0.0 2,644,265 344 7,686.8 7,272.0 1,996 38,686 2,849.6 fopen64
216
+ 0.0 1,848,847 66 28,012.8 5,390.0 1,156 480,098 75,714.8 futex
217
+ 0.0 1,696,227 1,286 1,319.0 1,127.0 1,000 9,841 674.0 fclose
218
+ 0.0 1,083,206 197 5,498.5 3,606.0 1,087 74,701 8,142.4 mmap
219
+ 0.0 1,076,556 10 107,655.6 127,469.5 19,259 199,656 71,478.9 pthread_rwlock_rdlock
220
+ 0.0 806,160 1 806,160.0 806,160.0 806,160 806,160 0.0 fork
221
+ 0.0 388,292 65 5,973.7 5,152.0 2,419 14,760 3,003.5 pipe2
222
+ 0.0 314,102 41 7,661.0 7,552.0 1,994 18,407 4,501.9 socket
223
+ 0.0 240,921 24 10,038.4 3,584.5 1,030 69,128 17,724.3 bind
224
+ 0.0 144,657 34 4,254.6 3,011.5 1,351 16,000 2,941.9 pthread_cond_broadcast
225
+ 0.0 86,266 7 12,323.7 13,530.0 3,460 23,269 7,320.6 fread
226
+ 0.0 60,319 5 12,063.8 8,575.0 3,768 23,707 7,966.6 accept4
227
+ 0.0 56,413 37 1,524.7 1,289.0 1,013 4,207 757.9 fcntl
228
+ 0.0 46,861 15 3,124.1 2,439.0 1,515 7,179 1,660.3 stat
229
+ 0.0 44,352 25 1,774.1 1,871.0 1,125 2,466 387.1 sigaction
230
+ 0.0 33,908 16 2,119.3 2,269.0 1,010 3,538 822.4 dup2
231
+ 0.0 26,126 10 2,612.6 2,300.0 1,008 7,345 1,792.1 pread
232
+ 0.0 24,947 8 3,118.4 2,937.5 1,040 5,596 2,129.1 fflush
233
+ 0.0 23,097 5 4,619.4 4,103.0 2,090 8,551 2,725.9 fwrite
234
+ 0.0 22,454 4 5,613.5 5,204.5 3,900 8,145 1,805.3 lstat
235
+ 0.0 21,962 4 5,490.5 5,141.0 3,524 8,156 1,995.5 flock
236
+ 0.0 20,689 11 1,880.8 1,816.0 1,426 2,769 414.2 listen
237
+ 0.0 13,565 3 4,521.7 4,506.0 4,310 4,749 219.9 fputs_unlocked
238
+ 0.0 11,536 5 2,307.2 2,273.0 1,638 3,118 528.8 mprotect
239
+ 0.0 8,918 5 1,783.6 1,795.0 1,003 3,233 906.7 fstat
240
+ 0.0 6,379 1 6,379.0 6,379.0 6,379 6,379 0.0 kill
241
+ 0.0 5,888 3 1,962.7 1,980.0 1,768 2,140 186.6 flockfile
242
+ 0.0 3,752 1 3,752.0 3,752.0 3,752 3,752 0.0 fputs
243
+ 0.0 3,389 2 1,694.5 1,694.5 1,296 2,093 563.6 openat64
244
+
245
+ [5/8] Executing 'cuda_api_sum' stats report
246
+
247
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
248
+ -------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
249
+ 43.7 1,486,144,372 5,546 267,966.9 5,405.5 3,421 119,285,070 1,741,913.5 cudaMemcpyAsync
250
+ 20.3 691,134,943 58,898 11,734.4 6,501.0 881 65,983,554 383,241.3 cudaLaunchKernel
251
+ 8.2 277,499,678 20,155 13,768.3 13,012.0 9,102 240,985 4,245.9 cudaGraphLaunch_v10000
252
+ 6.9 235,333,202 1,943 121,118.5 77,523.0 42,429 1,573,158 204,447.3 cudaGraphInstantiateWithFlags_v11040
253
+ 5.5 187,977,096 2,136 88,004.3 30,687.0 4,644 122,466,605 2,649,205.6 cudaDeviceSynchronize
254
+ 5.5 186,726,680 1,366 136,696.0 3,126.5 2,622 143,581,903 3,899,107.7 cudaStreamSynchronize
255
+ 3.4 114,031,743 33,151 3,439.8 4,167.0 689 147,601 2,360.2 cuLaunchKernel
256
+ 1.6 55,915,195 222 251,870.2 119,564.5 59,545 2,410,123 388,773.3 cudaFree
257
+ 1.2 41,787,784 348 120,079.8 112,828.0 8,334 1,053,627 57,682.4 cudaMalloc
258
+ 0.8 28,012,374 10 2,801,237.4 2,938,591.5 56,912 5,017,452 1,646,078.4 cuLibraryLoadData
259
+ 0.6 19,603,564 6,016 3,258.6 3,252.5 182 206,497 3,546.5 cudaMemsetAsync
260
+ 0.5 18,001,538 22,456 801.6 744.0 325 15,163 252.0 cudaStreamIsCapturing_v10000
261
+ 0.4 13,493,455 169 79,842.9 82,039.0 26,598 421,517 46,195.9 cuModuleLoadData
262
+ 0.3 10,862,324 9,998 1,086.4 417.0 271 4,538,659 46,724.1 cuKernelGetFunction
263
+ 0.3 9,786,234 18,895 517.9 489.0 307 7,453 151.6 cudaStreamGetCaptureInfo_v2_v11030
264
+ 0.2 8,477,629 1,943 4,363.2 4,336.0 3,372 11,361 550.5 cudaStreamBeginCapture_v10000
265
+ 0.2 7,848,956 1,943 4,039.6 3,988.0 2,469 11,052 584.9 cudaGraphDestroy_v10000
266
+ 0.1 3,305,186 128 25,821.8 2,208.5 1,414 1,124,514 138,112.8 cudaStreamCreateWithPriority
267
+ 0.1 2,707,973 1,943 1,393.7 1,378.0 1,055 7,779 187.1 cudaStreamEndCapture_v10000
268
+ 0.0 1,688,826 14 120,630.4 5,489.5 3,618 1,615,171 430,163.0 cudaHostAlloc
269
+ 0.0 1,671,277 1,943 860.2 794.0 637 3,051 250.4 cudaGraphGetNodes_v10000
270
+ 0.0 242,364 8 30,295.5 30,087.5 10,007 68,277 20,230.1 cudaMemGetInfo
271
+ 0.0 135,593 810 167.4 142.0 81 1,741 108.0 cuGetProcAddress_v2
272
+ 0.0 14,928 19 785.7 434.0 292 4,526 1,066.1 cudaEventCreateWithFlags
273
+ 0.0 14,127 15 941.8 817.0 490 1,683 386.4 cuLibraryGetKernel
274
+ 0.0 9,003 14 643.1 654.0 387 1,028 172.1 cudaThreadExchangeStreamCaptureMode_v10010
275
+ 0.0 6,456 1 6,456.0 6,456.0 6,456 6,456 0.0 cudaEventRecord
276
+ 0.0 4,689 3 1,563.0 1,239.0 1,181 2,269 612.1 cuInit
277
+ 0.0 4,627 4 1,156.8 965.5 190 2,506 1,151.2 cuModuleGetLoadingMode
278
+ 0.0 4,166 1 4,166.0 4,166.0 4,166 4,166 0.0 cudaStreamWaitEvent
279
+ 0.0 1,492 1 1,492.0 1,492.0 1,492 1,492 0.0 cudaEventDestroy
280
+ 0.0 923 2 461.5 461.5 352 571 154.9 cudaGetDriverEntryPoint_v11030
281
+
282
+ [6/8] Executing 'cuda_gpu_kern_sum' stats report
283
+
284
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
285
+ -------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
286
+ 39.3 732,498,877 19,488 37,587.2 37,313.0 36,128 65,280 1,363.6 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
287
+ 18.9 351,468,468 1,122 313,251.8 497,570.0 10,560 505,058 229,889.0 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
288
+ 10.2 191,063,614 3,836 49,808.0 22,016.0 7,808 132,256 39,115.9 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
289
+ 8.5 157,619,408 924 170,583.8 162,464.5 40,128 1,414,887 225,663.1 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
290
+ 4.2 78,494,924 28 2,803,390.1 2,805,357.0 2,788,621 2,809,197 5,574.6 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
291
+ 2.2 40,692,415 140 290,660.1 254,817.0 186,816 423,458 91,368.5 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
292
+ 2.1 39,174,391 8 4,896,798.9 4,852,759.0 4,797,142 5,086,872 109,448.4 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
293
+ 1.6 29,472,378 19,488 1,512.3 1,568.0 1,055 3,232 143.0 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
294
+ 1.2 22,871,420 1,960 11,669.1 4,447.5 1,055 461,155 54,297.5 triton_poi_fused_mul_silu_1
295
+ 1.2 22,661,811 784 28,905.4 12,512.0 11,712 62,496 20,606.8 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
296
+ 1.1 20,375,102 4 5,093,775.5 5,084,087.5 4,911,190 5,295,737 198,253.9 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
297
+ 0.8 14,291,999 28 510,428.5 511,906.5 469,986 513,666 7,950.6 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
298
+ 0.8 14,252,929 170 83,840.8 51,520.0 44,417 3,040,014 307,906.7 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
299
+ 0.7 12,842,687 106 121,157.4 8,912.0 6,976 498,914 190,206.3 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
300
+ 0.6 10,367,020 1,960 5,289.3 3,488.0 1,536 111,617 12,601.5 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
301
+ 0.5 9,729,741 4 2,432,435.3 2,432,139.0 2,391,627 2,473,836 44,136.5 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
302
+ 0.5 9,150,918 28 326,818.5 326,993.5 324,545 330,945 1,225.0 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
303
+ 0.5 8,477,259 224 37,844.9 37,792.5 36,449 39,201 492.7 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
304
+ 0.4 7,797,860 2 3,898,930.0 3,898,930.0 3,708,016 4,089,844 269,993.2 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
305
+ 0.4 7,227,678 336 21,510.9 21,424.0 21,088 22,752 352.3 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
306
+ 0.4 6,877,224 700 9,824.6 5,776.0 5,408 712,515 53,225.4 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
307
+ 0.3 6,458,033 1,960 3,294.9 2,048.0 1,536 79,136 9,038.3 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
308
+ 0.3 6,187,093 476 12,998.1 12,896.0 12,032 14,464 534.8 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
309
+ 0.3 5,894,651 4 1,473,662.8 1,473,527.0 1,472,615 1,474,982 1,009.9 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
310
+ 0.2 4,390,517 140 31,360.8 27,248.5 26,080 49,569 8,300.7 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
311
+ 0.2 4,165,038 698 5,967.1 3,088.0 2,080 1,005,157 53,500.9 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
312
+ 0.2 4,133,648 1,890 2,187.1 1,824.0 1,343 22,113 2,250.2 triton_poi_fused_cat_3
313
+ 0.2 3,999,603 2 1,999,801.5 1,999,801.5 1,998,857 2,000,746 1,335.7 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
314
+ 0.2 3,826,146 4,587 834.1 800.0 767 1,312 75.4 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
315
+ 0.2 3,594,124 56 64,180.8 64,192.5 63,264 65,537 457.5 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
316
+ 0.2 3,435,441 4 858,860.3 858,356.0 855,844 862,885 3,126.9 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
317
+ 0.2 3,196,367 2 1,598,183.5 1,598,183.5 1,574,055 1,622,312 34,122.9 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
318
+ 0.2 2,907,709 1,512 1,923.1 1,856.0 1,312 2,976 434.3 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
319
+ 0.1 2,738,586 1,890 1,449.0 1,152.0 863 17,280 1,803.1 triton_poi_fused_view_5
320
+ 0.1 2,670,047 1,890 1,412.7 1,375.0 1,216 6,112 532.9 triton_poi_fused_cat_4
321
+ 0.1 2,665,755 832 3,204.0 3,136.0 2,880 7,488 322.6 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
322
+ 0.1 2,582,188 2 1,291,094.0 1,291,094.0 1,290,950 1,291,238 203.6 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
323
+ 0.1 2,578,221 2 1,289,110.5 1,289,110.5 1,287,431 1,290,790 2,375.2 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
324
+ 0.1 2,425,933 112 21,660.1 21,632.0 9,535 34,625 12,038.2 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
325
+ 0.1 1,374,214 2 687,107.0 687,107.0 679,619 694,595 10,589.6 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
326
+ 0.1 1,150,824 28 41,100.9 41,056.0 40,161 43,264 550.5 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
327
+ 0.1 1,135,783 696 1,631.9 1,632.0 1,344 1,856 53.5 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
328
+ 0.1 957,187 28 34,185.3 34,752.5 17,888 35,328 3,202.1 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
329
+ 0.0 868,532 698 1,244.3 1,248.0 1,056 1,728 62.7 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
330
+ 0.0 633,512 696 910.2 896.0 895 1,024 24.6 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
331
+ 0.0 614,815 696 883.4 865.0 863 992 24.3 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
332
+ 0.0 456,669 515 886.7 896.0 863 1,855 47.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
333
+ 0.0 296,773 168 1,766.5 1,760.0 1,536 2,016 122.4 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
334
+ 0.0 260,900 70 3,727.1 3,072.0 1,984 36,449 4,037.6 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
335
+ 0.0 192,163 70 2,745.2 2,144.0 1,663 39,552 4,482.5 triton_poi_fused_cat_1
336
+ 0.0 167,425 185 905.0 896.0 863 992 31.8 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
337
+ 0.0 156,225 1 156,225.0 156,225.0 156,225 156,225 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
338
+ 0.0 102,214 70 1,460.2 1,360.0 1,216 9,152 936.0 triton_poi_fused_cat_2
339
+ 0.0 100,832 70 1,440.5 1,199.5 864 13,984 1,559.1 triton_poi_fused_view_3
340
+ 0.0 79,488 1 79,488.0 79,488.0 79,488 79,488 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
341
+ 0.0 64,127 58 1,105.6 896.0 864 11,392 1,376.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
342
+ 0.0 44,032 1 44,032.0 44,032.0 44,032 44,032 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
343
+ 0.0 36,640 28 1,308.6 1,312.0 1,280 1,375 18.0 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
344
+ 0.0 26,464 1 26,464.0 26,464.0 26,464 26,464 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
345
+ 0.0 19,520 1 19,520.0 19,520.0 19,520 19,520 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
346
+ 0.0 11,425 11 1,038.6 864.0 800 1,504 271.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
347
+ 0.0 10,720 2 5,360.0 5,360.0 5,280 5,440 113.1 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
348
+ 0.0 8,736 2 4,368.0 4,368.0 4,224 4,512 203.6 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
349
+ 0.0 3,616 2 1,808.0 1,808.0 1,600 2,016 294.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
350
+ 0.0 3,456 2 1,728.0 1,728.0 1,696 1,760 45.3 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
351
+ 0.0 3,200 2 1,600.0 1,600.0 1,504 1,696 135.8 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
352
+ 0.0 3,008 2 1,504.0 1,504.0 1,344 1,664 226.3 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
353
+ 0.0 2,977 2 1,488.5 1,488.5 993 1,984 700.7 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
354
+ 0.0 2,912 2 1,456.0 1,456.0 1,344 1,568 158.4 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
355
+ 0.0 2,304 1 2,304.0 2,304.0 2,304 2,304 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
356
+ 0.0 1,185 1 1,185.0 1,185.0 1,185 1,185 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
357
+ 0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
358
+ 0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
359
+ 0.0 896 1 896.0 896.0 896 896 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
360
+
361
+ [7/8] Executing 'cuda_gpu_mem_time_sum' stats report
362
+
363
+ Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
364
+ -------- --------------- ----- ----------- ----------- --------- ----------- ----------- ------------------------------
365
+ 98.5 509,288,023 4,846 105,094.5 352.0 288 118,727,568 1,792,036.2 [CUDA memcpy Host-to-Device]
366
+ 1.1 5,441,785 4 1,360,446.3 1,361,446.0 1,356,198 1,362,695 2,892.8 [CUDA memcpy Device-to-Device]
367
+ 0.3 1,631,584 3,104 525.6 384.0 289 2,016 236.9 [CUDA memset]
368
+ 0.2 794,753 696 1,141.9 1,120.0 864 1,664 71.6 [CUDA memcpy Device-to-Host]
369
+
370
+ [8/8] Executing 'cuda_gpu_mem_size_sum' stats report
371
+
372
+ Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
373
+ ---------- ----- -------- -------- -------- -------- ----------- ------------------------------
374
+ 3,090.507 4,846 0.638 0.000 0.000 466.747 7.613 [CUDA memcpy Host-to-Device]
375
+ 2,489.319 4 622.330 622.330 622.330 622.330 0.000 [CUDA memcpy Device-to-Device]
376
+ 2.068 3,104 0.001 0.001 0.000 0.006 0.001 [CUDA memset]
377
+ 0.008 696 0.000 0.000 0.000 0.000 0.000 [CUDA memcpy Device-to-Host]
378
+
379
+ Generated:
380
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill_1152.nsys-rep
381
+ /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill_1152.sqlite
sim_traverse_bs/traverse_bs_util_sim_prefill_1152.nsys-rep ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf9e96f6acddf12e5bda76a4f88fa873ab34c046eca72b3ca35e23ba788c9e25
3
+ size 21975663
std_traverse_bs/terminal.log ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 模型加载完成。
2
+
3
+ ===== 场景:prefill640_decode512 | prefill=640, decode=512 =====
4
+
5
+ --- 批量大小 bs=1 ---
6
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 431.65it/s]
7
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████████| 1/1 [00:02<00:00, 2.57s/it, est. speed input: 249.31 toks/s, output: 199.44 toks/s]
8
+ 执行时间: 2.5731 s
9
+ 实际平均输入 tokens: 640.00(目标 640)
10
+ 生成总 tokens: 512
11
+ 吞吐(生成tokens/秒): 198.98
12
+ TTFT (V1 metrics): 0.0238 s
13
+ 解码吞吐 (V1 metrics): 200.69 tok/s
14
+
15
+ --- 批量大小 bs=2 ---
16
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 202.04it/s]
17
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████████| 2/2 [00:02<00:00, 1.36s/it, est. speed input: 470.56 toks/s, output: 376.45 toks/s]
18
+ 执行时间: 2.7325 s
19
+ 实际平均输入 tokens: 640.00(目标 640)
20
+ 生成总 tokens: 1024
21
+ 吞吐(生成tokens/秒): 374.75
22
+ TTFT (V1 metrics): 0.0129 s
23
+ 解码吞吐 (V1 metrics): 188.36 tok/s
24
+
25
+ --- 批量大小 bs=4 ---
26
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 210.51it/s]
27
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████████| 4/4 [00:03<00:00, 1.33it/s, est. speed input: 852.39 toks/s, output: 681.91 toks/s]
28
+ 执行时间: 3.0248 s
29
+ 实际平均输入 tokens: 640.00(目标 640)
30
+ 生成总 tokens: 2048
31
+ 吞吐(生成tokens/秒): 677.07
32
+ TTFT (V1 metrics): 0.0164 s
33
+ 解码吞吐 (V1 metrics): 170.68 tok/s
34
+
35
+ --- 批量大小 bs=8 ---
36
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:00<00:00, 229.89it/s]
37
+ Processed prompts: 100%|█████████████████████████████████████████████████████████████████| 8/8 [00:03<00:00, 2.59it/s, est. speed input: 1658.80 toks/s, output: 1327.03 toks/s]
38
+ 执行时间: 3.1236 s
39
+ 实际平均输入 tokens: 640.00(目标 640)
40
+ 生成总 tokens: 4096
41
+ 吞吐(生成tokens/秒): 1311.31
42
+ TTFT (V1 metrics): 0.0219 s
43
+ 解码吞吐 (V1 metrics): 165.93 tok/s
44
+
45
+ --- 批量大小 bs=16 ---
46
+ Adding requests: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 16/16 [00:00<00:00, 259.80it/s]
47
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████| 16/16 [00:03<00:00, 5.28it/s, est. speed input: 3376.77 toks/s, output: 2701.40 toks/s]
48
+ 执行时间: 3.0966 s
49
+ 实际平均输入 tokens: 640.00(目标 640)
50
+ 生成总 tokens: 8192
51
+ 吞吐(生成tokens/秒): 2645.49
52
+ TTFT (V1 metrics): 0.0314 s
53
+ 解码吞吐 (V1 metrics): 168.86 tok/s
54
+
55
+ --- 批量大小 bs=32 ---
56
+ Adding requests: 100%|████████████████████████████████████���█████████████████████████████████████████████████████████████████████████████████████| 32/32 [00:00<00:00, 335.64it/s]
57
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████| 32/32 [00:03<00:00, 9.64it/s, est. speed input: 6170.77 toks/s, output: 4936.59 toks/s]
58
+ 执行时间: 3.4167 s
59
+ 实际平均输入 tokens: 640.00(目标 640)
60
+ 生成总 tokens: 16384
61
+ 吞吐(生成tokens/秒): 4795.34
62
+ TTFT (V1 metrics): 0.0426 s
63
+ 解码吞吐 (V1 metrics): 154.14 tok/s
64
+
65
+ --- 批量大小 bs=64 ---
66
+ Adding requests: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 64/64 [00:00<00:00, 449.34it/s]
67
+ Processed prompts: 100%|██████████████████████████████████████████████████████████████| 64/64 [00:03<00:00, 16.67it/s, est. speed input: 10672.90 toks/s, output: 8538.28 toks/s]
68
+ 执行时间: 3.9839 s
69
+ 实际平均输入 tokens: 640.00(目标 640)
70
+ 生成总 tokens: 32768
71
+ 吞吐(生成tokens/秒): 8225.04
72
+ TTFT (V1 metrics): 0.0638 s
73
+ 解码吞吐 (V1 metrics): 133.10 tok/s
74
+
75
+ --- 批量大小 bs=128 ---
76
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 128/128 [00:00<00:00, 489.69it/s]
77
+ Processed prompts: 100%|███████████████████████████████████████████████████████████| 128/128 [00:04<00:00, 26.33it/s, est. speed input: 16849.84 toks/s, output: 13425.52 toks/s]
78
+ 执行时间: 5.1276 s
79
+ 实际平均输入 tokens: 640.00(目标 640)
80
+ 生成总 tokens: 65272
81
+ 吞吐(生成tokens/秒): 12729.46
82
+ TTFT (V1 metrics): 0.1208 s
83
+ 解码吞吐 (V1 metrics): 104.39 tok/s
84
+
85
+ --- 批量大小 bs=256 ---
86
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 256/256 [00:00<00:00, 351.03it/s]
87
+ Processed prompts: 100%|███████████████████████████████████████████████████████████| 256/256 [00:07<00:00, 35.75it/s, est. speed input: 22881.00 toks/s, output: 18304.75 toks/s]
88
+ 执行时间: 7.8977 s
89
+ 实际平均输入 tokens: 640.00(目标 640)
90
+ 生成总 tokens: 131072
91
+ 吞吐(生成tokens/秒): 16596.30
92
+ TTFT (V1 metrics): 0.4649 s
93
+ 解码吞吐 (V1 metrics): 69.79 tok/s
94
+
95
+ --- 批量大小 bs=512 ---
96
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 512/512 [00:00<00:00, 535.56it/s]
97
+ Processed prompts: 100%|███████████████████████████████████████████████████████████| 512/512 [00:12<00:00, 41.03it/s, est. speed input: 26258.02 toks/s, output: 21006.35 toks/s]
98
+ 执行时间: 13.4567 s
99
+ 实际平均输入 tokens: 640.00(目标 640)
100
+ 生成总 tokens: 262144
101
+ 吞吐(生成tokens/秒): 19480.62
102
+ TTFT (V1 metrics): 0.4882 s
103
+ 解码吞吐 (V1 metrics): 40.60 tok/s
104
+
105
+ --- 批量大小 bs=1024 ---
106
+ Adding requests: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1024/1024 [00:01<00:00, 553.17it/s]
107
+ Processed prompts: 100%|████████��████████████████████████████████████████████████| 1024/1024 [00:24<00:00, 41.44it/s, est. speed input: 26524.06 toks/s, output: 21219.22 toks/s]
108
+ 执行时间: 26.5997 s
109
+ 实际平均输入 tokens: 640.00(目标 640)
110
+ 生成总 tokens: 524288
111
+ 吞吐(生成tokens/秒): 19710.29
112
+ TTFT (V1 metrics): 0.9604 s
113
+ 解码吞吐 (V1 metrics): 20.56 tok/s
114
+
115
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
116
+ [rank0]:[W813 18:53:54.532255598 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
std_traverse_bs/traverse_bs_util_std.log ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WARNING: CPU IP/backtrace sampling not supported, disabling.
2
+ Try the 'nsys status --environment' command to learn more.
3
+
4
+ WARNING: CPU context switch tracing not supported, disabling.
5
+ Try the 'nsys status --environment' command to learn more.
6
+
7
+ INFO 08-13 19:02:19 [__init__.py:235] Automatically detected platform cuda.
8
+ CUDA_VISIBLE_DEVICES = 3
9
+ --- vLLM V1 基准测试(含 NVTX 标记)---
10
+ 模型: Qwen/Qwen2-1.5B
11
+ 批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
12
+ 场景: ['prefill640_decode512']
13
+ ------------------------------------------------------------
14
+ 加载分词器/模型中...
15
+ INFO 08-13 19:02:29 [config.py:1604] Using max model len 4096
16
+ INFO 08-13 19:02:29 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
17
+ INFO 08-13 19:02:35 [__init__.py:235] Automatically detected platform cuda.
18
+ INFO 08-13 19:02:37 [core.py:572] Waiting for init message from front-end.
19
+ INFO 08-13 19:02:37 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
20
+ INFO 08-13 19:02:40 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
21
+ WARNING 08-13 19:02:40 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
22
+ INFO 08-13 19:02:40 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
23
+ INFO 08-13 19:02:40 [gpu_model_runner.py:1875] Loading model from scratch...
24
+ INFO 08-13 19:02:40 [cuda.py:290] Using Flash Attention backend on V1 engine.
25
+ INFO 08-13 19:02:40 [weight_utils.py:296] Using model weights format ['*.safetensors']
26
+ INFO 08-13 19:02:41 [weight_utils.py:349] No model.safetensors.index.json found in remote.
27
+
28
+
29
+
30
+
31
+ INFO 08-13 19:02:42 [default_loader.py:262] Loading weights took 0.75 seconds
32
+ INFO 08-13 19:02:42 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 1.965894 seconds
33
+ INFO 08-13 19:02:48 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
34
+ INFO 08-13 19:02:48 [backends.py:541] Dynamo bytecode transform time: 6.08 s
35
+ INFO 08-13 19:02:53 [backends.py:194] Cache the graph for dynamic shape for later use
36
+ INFO 08-13 19:03:14 [backends.py:215] Compiling a graph for dynamic shape takes 25.33 s
37
+ INFO 08-13 19:03:21 [monitor.py:34] torch.compile takes 31.41 s in total
38
+ INFO 08-13 19:03:22 [gpu_worker.py:255] Available KV cache memory: 12.80 GiB
39
+ INFO 08-13 19:03:22 [kv_cache_utils.py:833] GPU KV cache size: 479,456 tokens
40
+ INFO 08-13 19:03:22 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.05x
41
+
42
+ INFO 08-13 19:03:25 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
43
+ INFO 08-13 19:03:25 [core.py:193] init engine (profile, create kv cache, warmup model) took 42.37 seconds
44
+ 模型加载完成。
45
+
46
+ ===== 场景:prefill640_decode512 | prefill=640, decode=512 =====
47
+
48
+ --- 批量大小 bs=1 ---
49
+
50
+
51
+ 执行时间: 3.3360 s
52
+ 实际平均输入 tokens: 640.00(目标 640)
53
+ 生成总 tokens: 512
54
+ 吞吐(生成tokens/秒): 153.48
55
+ TTFT (V1 metrics): 0.0327 s
56
+ 解码吞吐 (V1 metrics): 154.93 tok/s
57
+
58
+ --- 批量大小 bs=2 ---
59
+
60
+
61
+ 执行时间: 3.7300 s
62
+ 实际平均输入 tokens: 640.00(目标 640)
63
+ 生成总 tokens: 1024
64
+ 吞吐(生成tokens/秒): 274.53
65
+ TTFT (V1 metrics): 0.0158 s
66
+ 解码吞吐 (V1 metrics): 137.84 tok/s
67
+
68
+ --- 批量大小 bs=4 ---
69
+
70
+
71
+ 执行时间: 3.6164 s
72
+ 实际平均输入 tokens: 640.00(目标 640)
73
+ 生成总 tokens: 2048
74
+ 吞吐(生成tokens/秒): 566.30
75
+ TTFT (V1 metrics): 0.0169 s
76
+ 解码吞吐 (V1 metrics): 142.63 tok/s
77
+
78
+ --- 批量大小 bs=8 ---
79
+
80
+
81
+ 执行时间: 3.7265 s
82
+ 实际平均输入 tokens: 640.00(目标 640)
83
+ 生成总 tokens: 4096
84
+ 吞吐(生成tokens/秒): 1099.15
85
+ TTFT (V1 metrics): 0.0219 s
86
+ 解码吞吐 (V1 metrics): 138.89 tok/s
87
+
88
+ --- 批量大小 bs=16 ---
89
+
90
+
91
+ 执行时间: 3.8919 s
92
+ 实际平均输入 tokens: 640.00(目标 640)
93
+ 生成总 tokens: 8192
94
+ 吞吐(生成tokens/秒): 2104.89
95
+ TTFT (V1 metrics): 0.0329 s
96
+ 解码吞吐 (V1 metrics): 133.82 tok/s
97
+
98
+ --- 批量大小 bs=32 ---
99
+
100
+
101
+ 执行时间: 4.0341 s
102
+ 实际平均输入 tokens: 640.00(目标 640)
103
+ 生成总 tokens: 16384
104
+ 吞吐(生成tokens/秒): 4061.41
105
+ TTFT (V1 metrics): 0.0461 s
106
+ 解码吞吐 (V1 metrics): 130.12 tok/s
107
+
108
+ --- 批量大小 bs=64 ---
109
+
110
+
111
+ 执行时间: 4.4199 s
112
+ 实际平均输入 tokens: 640.00(目标 640)
113
+ 生成总 tokens: 32768
114
+ 吞吐(生成tokens/秒): 7413.77
115
+ TTFT (V1 metrics): 0.0691 s
116
+ 解码吞吐 (V1 metrics): 120.00 tok/s
117
+
118
+ --- 批量大小 bs=128 ---
119
+
120
+
121
+ 执行时间: 6.2947 s
122
+ 实际平均输入 tokens: 640.00(目标 640)
123
+ 生成总 tokens: 65421
124
+ 吞吐(生成tokens/秒): 10393.02
125
+ TTFT (V1 metrics): 0.1218 s
126
+ 解码吞吐 (V1 metrics): 84.64 tok/s
127
+
128
+ --- 批量大小 bs=256 ---
129
+
130
+
131
+ 执行时间: 9.2625 s
132
+ 实际平均输入 tokens: 640.00(目标 640)
133
+ 生成总 tokens: 131072
134
+ 吞吐(生成tokens/秒): 14150.76
135
+ TTFT (V1 metrics): 0.4813 s
136
+ 解码吞吐 (V1 metrics): 59.00 tok/s
137
+
138
+ --- 批量大小 bs=512 ---
139
+
140
+
141
+ 执行时间: 14.1481 s
142
+ 实际平均输入 tokens: 640.00(目标 640)
143
+ 生成总 tokens: 262144
144
+ 吞吐(生成tokens/秒): 18528.59
145
+ TTFT (V1 metrics): 0.4908 s
146
+ 解码吞吐 (V1 metrics): 38.46 tok/s
147
+
148
+ --- 批量大小 bs=1024 ---
149
+
150
+
151
+ [rank0]:[W813 19:04:51.071947265 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
152
+ 执行时间: 27.7908 s
153
+ 实际平均输入 tokens: 640.00(目标 640)
154
+ 生成总 tokens: 524288
155
+ 吞吐(生成tokens/秒): 18865.49
156
+ TTFT (V1 metrics): 0.9638 s
157
+ 解码吞吐 (V1 metrics): 19.72 tok/s
158
+
159
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
160
+ GPU 3: General Metrics for NVIDIA AD10x (any frequency)
161
+ Generating '/tmp/nsys-report-e7ae.qdstrm'
162
+
163
+
164
+ [3/8] Executing 'nvtx_sum' stats report
165
+
166
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
167
+ -------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- ----------------------------------------
168
+ 43.1 63,806,267,138 1 63,806,267,138.0 63,806,267,138.0 63,806,267,138 63,806,267,138 0.0 PushPop :LLM_init
169
+ 18.8 27,790,304,411 1 27,790,304,411.0 27,790,304,411.0 27,790,304,411 27,790,304,411 0.0 PushPop :generate [prefill640_decode512] bs=1024
170
+ 9.6 14,147,468,287 1 14,147,468,287.0 14,147,468,287.0 14,147,468,287 14,147,468,287 0.0 PushPop :generate [prefill640_decode512] bs=512
171
+ 6.3 9,262,392,366 1 9,262,392,366.0 9,262,392,366.0 9,262,392,366 9,262,392,366 0.0 PushPop :generate [prefill640_decode512] bs=256
172
+ 4.3 6,294,556,076 1 6,294,556,076.0 6,294,556,076.0 6,294,556,076 6,294,556,076 0.0 PushPop :generate [prefill640_decode512] bs=128
173
+ 3.0 4,419,734,921 1 4,419,734,921.0 4,419,734,921.0 4,419,734,921 4,419,734,921 0.0 PushPop :generate [prefill640_decode512] bs=64
174
+ 2.7 4,033,922,062 1 4,033,922,062.0 4,033,922,062.0 4,033,922,062 4,033,922,062 0.0 PushPop :generate [prefill640_decode512] bs=32
175
+ 2.6 3,891,757,396 1 3,891,757,396.0 3,891,757,396.0 3,891,757,396 3,891,757,396 0.0 PushPop :generate [prefill640_decode512] bs=16
176
+ 2.5 3,729,817,085 1 3,729,817,085.0 3,729,817,085.0 3,729,817,085 3,729,817,085 0.0 PushPop :generate [prefill640_decode512] bs=2
177
+ 2.5 3,726,348,651 1 3,726,348,651.0 3,726,348,651.0 3,726,348,651 3,726,348,651 0.0 PushPop :generate [prefill640_decode512] bs=8
178
+ 2.4 3,616,307,172 1 3,616,307,172.0 3,616,307,172.0 3,616,307,172 3,616,307,172 0.0 PushPop :generate [prefill640_decode512] bs=4
179
+ 2.3 3,335,871,818 1 3,335,871,818.0 3,335,871,818.0 3,335,871,818 3,335,871,818 0.0 PushPop :generate [prefill640_decode512] bs=1
180
+ 0.0 88,217 2 44,108.5 44,108.5 42,206 46,011 2,690.5 PushPop CCCL:cub::DeviceSegmentedRadixSort
181
+
182
+ [4/8] Executing 'osrt_sum' stats report
183
+
184
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
185
+ -------- ----------------- --------- --------------- ---------------- --------- --------------- --------------- ----------------------
186
+ 30.2 1,744,937,975,193 54,559 31,982,587.2 36,252.0 1,000 131,944,469,826 1,264,387,583.2 pthread_cond_timedwait
187
+ 24.4 1,409,850,293,508 90,937 15,503,593.6 10,063,900.0 1,000 89,656,403,165 542,281,129.3 epoll_wait
188
+ 24.3 1,405,293,461,182 2,062 681,519,622.3 169,137.0 1,777 131,946,128,306 9,309,452,118.6 pthread_cond_wait
189
+ 8.2 470,649,973,426 67 7,024,626,469.0 10,000,075,590.0 8,879 10,000,128,605 4,563,470,551.8 sem_timedwait
190
+ 7.6 440,451,323,826 42,747 10,303,678.0 1,400.0 1,000 13,214,170,856 151,629,150.0 poll
191
+ 2.6 148,761,603,454 44,756 3,323,836.0 2,218.0 1,000 130,016,250,506 617,021,890.8 read
192
+ 2.4 141,326,431,455 11,558 12,227,585.3 7,211,303.0 21,424 658,160,680 14,124,156.6 sem_wait
193
+ 0.1 5,907,485,407 725 8,148,255.7 1,042.0 1,000 442,887,066 44,916,303.4 waitpid
194
+ 0.0 1,252,683,205 505,872 2,476.3 1,440.0 1,006 94,581,342 133,009.9 munmap
195
+ 0.0 907,262,068 353 2,570,147.5 1,227,045.0 1,197 26,263,430 3,108,782.5 pthread_rwlock_wrlock
196
+ 0.0 708,053,362 172 4,116,589.3 615,554.5 3,112 29,565,907 7,196,056.2 pthread_join
197
+ 0.0 327,596,329 10,080 32,499.6 10,962.5 1,000 29,220,721 379,953.4 ioctl
198
+ 0.0 261,805,946 495 528,900.9 2,972.0 1,077 19,958,090 3,072,110.4 fopen
199
+ 0.0 160,631,905 36,713 4,375.3 3,385.0 1,000 1,692,951 12,061.0 mmap64
200
+ 0.0 150,219,603 6,263 23,985.2 8,256.0 1,010 2,585,639 128,799.4 pthread_mutex_lock
201
+ 0.0 126,650,615 25 5,066,024.6 5,065,286.0 5,053,603 5,077,238 7,361.7 nanosleep
202
+ 0.0 99,487,929 31,988 3,110.2 2,516.0 1,000 75,099 2,795.8 open64
203
+ 0.0 90,344,763 9,083 9,946.6 4,712.0 1,148 2,648,924 36,587.2 recv
204
+ 0.0 84,823,390 9,082 9,339.7 5,735.5 1,468 89,916 8,146.6 send
205
+ 0.0 78,685,390 43,321 1,816.3 1,662.0 1,000 354,334 2,967.5 pthread_cond_signal
206
+ 0.0 69,793,423 5,751 12,135.9 2,001.0 1,035 19,751,200 420,727.9 open
207
+ 0.0 66,948,530 15,954 4,196.3 2,557.0 1,010 575,245 10,386.1 write
208
+ 0.0 50,999,387 10 5,099,938.7 19,477.5 9,808 50,802,125 16,058,116.0 connect
209
+ 0.0 18,140,344 11,019 1,646.3 1,411.0 1,000 24,064 709.4 epoll_ctl
210
+ 0.0 18,080,692 280 64,573.9 51,655.5 16,288 578,620 60,626.8 pthread_create
211
+ 0.0 9,776,017 147 66,503.5 68,572.0 55,588 85,413 5,091.0 sleep
212
+ 0.0 7,076,518 18 393,139.9 383,949.5 262,109 584,262 85,377.7 posix_spawn
213
+ 0.0 6,601,831 899 7,343.5 5,539.0 1,010 84,493 9,244.8 fgets
214
+ 0.0 5,962,720 22 271,032.7 169,725.0 16,478 675,901 237,522.4 pthread_rwlock_rdlock
215
+ 0.0 3,506,178 342 10,252.0 2,354.5 1,006 207,881 30,027.2 pthread_cond_broadcast
216
+ 0.0 3,013,330 1,417 2,126.6 1,083.0 1,008 110,583 7,412.5 fclose
217
+ 0.0 2,975,781 1,319 2,256.1 1,598.0 1,000 22,362 1,984.6 stat
218
+ 0.0 2,885,075 692 4,169.2 4,026.5 1,238 45,085 4,258.3 fopen64
219
+ 0.0 2,145,928 345 6,220.1 3,291.0 1,000 51,101 8,060.8 fread
220
+ 0.0 1,950,425 65 30,006.5 3,106.0 1,201 261,266 71,483.9 futex
221
+ 0.0 1,877,146 336 5,586.7 4,406.0 1,002 83,435 6,148.2 mmap
222
+ 0.0 1,741,859 102 17,077.0 4,155.5 1,030 432,266 67,240.1 fwrite
223
+ 0.0 1,680,806 1,203 1,397.2 1,251.0 1,000 8,322 493.0 fstat
224
+ 0.0 1,075,803 1 1,075,803.0 1,075,803.0 1,075,803 1,075,803 0.0 fork
225
+ 0.0 721,566 99 7,288.5 6,229.0 2,560 17,976 3,711.4 pipe2
226
+ 0.0 595,674 19 31,351.3 5,092.0 4,006 383,239 86,162.0 putc
227
+ 0.0 248,534 41 6,061.8 4,850.0 1,628 18,035 4,032.6 socket
228
+ 0.0 183,085 115 1,592.0 1,513.0 1,001 2,876 429.3 sigaction
229
+ 0.0 167,627 16 10,476.7 2,835.5 1,086 55,140 16,437.5 bind
230
+ 0.0 115,447 8 14,430.9 6,418.0 3,542 41,638 15,571.8 fputs
231
+ 0.0 93,879 16 5,867.4 5,035.0 1,640 14,112 3,769.0 lstat
232
+ 0.0 60,214 6 10,035.7 10,003.0 9,288 10,680 531.1 getc
233
+ 0.0 49,588 27 1,836.6 1,677.0 1,019 2,939 596.6 dup2
234
+ 0.0 47,084 37 1,272.5 1,112.0 1,002 3,284 431.2 fcntl
235
+ 0.0 37,610 24 1,567.1 1,488.5 1,021 2,512 357.1 signal
236
+ 0.0 35,001 5 7,000.2 7,792.0 3,662 10,502 2,759.1 accept4
237
+ 0.0 31,541 9 3,504.6 4,424.0 1,078 6,400 2,269.5 fflush
238
+ 0.0 15,432 4 3,858.0 3,690.5 2,526 5,525 1,477.8 flock
239
+ 0.0 15,311 11 1,391.9 1,301.0 1,125 1,918 254.0 listen
240
+ 0.0 14,040 8 1,755.0 1,552.0 1,310 3,235 642.4 pread
241
+ 0.0 12,841 3 4,280.3 4,425.0 3,921 4,495 313.2 fputs_unlocked
242
+ 0.0 12,488 5 2,497.6 2,496.0 2,206 2,947 282.0 mprotect
243
+ 0.0 10,666 1 10,666.0 10,666.0 10,666 10,666 0.0 dup
244
+ 0.0 6,561 3 2,187.0 1,727.0 1,636 3,198 876.7 flockfile
245
+ 0.0 6,208 1 6,208.0 6,208.0 6,208 6,208 0.0 kill
246
+ 0.0 3,788 2 1,894.0 1,894.0 1,355 2,433 762.3 openat64
247
+ 0.0 2,317 2 1,158.5 1,158.5 1,025 1,292 188.8 pthread_mutex_trylock
248
+
249
+ [5/8] Executing 'cuda_api_sum' stats report
250
+
251
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
252
+ -------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
253
+ 54.0 18,079,332,597 62,830 287,750.0 8,213.0 2,790 112,775,333 1,983,727.7 cudaMemcpyAsync
254
+ 21.3 7,151,454,905 1,151,462 6,210.8 4,908.0 775 60,344,468 100,074.1 cudaLaunchKernel
255
+ 13.9 4,659,698,239 3,031 1,537,346.8 36,322.0 1,646 137,811,508 5,892,401.8 cudaDeviceSynchronize
256
+ 5.7 1,902,754,251 154,454 12,319.2 10,944.0 7,132 6,919,461 39,439.6 cudaGraphLaunch_v10000
257
+ 2.4 817,695,167 151,595 5,393.9 4,998.0 606 8,288,395 63,229.0 cuLaunchKernel
258
+ 0.7 225,761,461 1,943 116,192.2 74,685.0 38,766 1,503,454 195,876.4 cudaGraphInstantiateWithFlags_v11040
259
+ 0.4 145,677,030 27,893 5,222.7 5,401.0 170 3,577,542 23,514.0 cudaMemsetAsync
260
+ 0.4 123,078,496 156,852 784.7 749.0 293 28,187 217.2 cudaStreamIsCapturing_v10000
261
+ 0.2 80,818,259 41,653 1,940.3 1,901.0 1,681 230,930 1,185.3 cudaEventRecord
262
+ 0.2 70,123,080 11,007 6,370.8 2,991.0 1,576 11,464,127 117,490.1 cudaStreamSynchronize
263
+ 0.2 54,985,787 222 247,683.7 127,094.5 70,289 2,353,583 356,501.3 cudaFree
264
+ 0.1 39,808,657 349 114,064.9 107,734.0 9,252 1,028,648 56,280.0 cudaMalloc
265
+ 0.1 33,850,909 41,671 812.3 782.0 275 186,043 928.6 cudaEventCreateWithFlags
266
+ 0.1 25,030,442 10 2,503,044.2 2,591,457.0 57,483 4,465,642 1,429,904.6 cuLibraryLoadData
267
+ 0.1 20,138,251 281 71,666.4 73,047.0 25,875 416,759 45,705.2 cuModuleLoadData
268
+ 0.1 18,115,827 41,653 434.9 403.0 338 226,830 1,887.5 cudaEventDestroy
269
+ 0.1 17,924,935 16,808 1,066.5 493.0 261 6,344,359 50,228.8 cuKernelGetFunction
270
+ 0.0 9,263,417 18,895 490.3 467.0 322 6,477 105.4 cudaStreamGetCaptureInfo_v2_v11030
271
+ 0.0 7,974,210 1,943 4,104.1 4,022.0 3,214 9,676 584.5 cudaStreamBeginCapture_v10000
272
+ 0.0 7,518,878 1,943 3,869.7 3,828.0 2,357 7,833 536.7 cudaGraphDestroy_v10000
273
+ 0.0 3,416,827 128 26,694.0 2,299.0 1,471 1,153,703 140,616.2 cudaStreamCreateWithPriority
274
+ 0.0 2,744,082 1,943 1,412.3 1,389.0 1,050 7,178 196.6 cudaStreamEndCapture_v10000
275
+ 0.0 1,570,575 1,943 808.3 739.0 614 2,547 251.2 cudaGraphGetNodes_v10000
276
+ 0.0 1,322,243 15 88,149.5 6,436.0 3,579 1,170,830 300,044.6 cudaHostAlloc
277
+ 0.0 280,352 8 35,044.0 26,955.5 12,673 101,212 28,421.5 cudaMemGetInfo
278
+ 0.0 138,906 810 171.5 140.0 79 1,705 118.0 cuGetProcAddress_v2
279
+ 0.0 23,009 16 1,438.1 808.5 451 5,531 1,508.2 cuLibraryGetKernel
280
+ 0.0 8,159 14 582.8 544.5 324 990 193.4 cudaThreadExchangeStreamCaptureMode_v10010
281
+ 0.0 4,031 1 4,031.0 4,031.0 4,031 4,031 0.0 cudaStreamWaitEvent
282
+ 0.0 3,969 3 1,323.0 1,051.0 1,031 1,887 488.5 cuInit
283
+ 0.0 3,693 4 923.3 916.5 75 1,785 960.8 cuModuleGetLoadingMode
284
+ 0.0 1,064 2 532.0 532.0 356 708 248.9 cudaGetDriverEntryPoint_v11030
285
+
286
+ [6/8] Executing 'cuda_gpu_kern_sum' stats report
287
+
288
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
289
+ -------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
290
+ 30.6 11,421,290,038 118,048 96,751.2 42,337.0 12,320 576,069 124,175.4 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
291
+ 20.4 7,605,306,813 28,807 264,009.0 265,858.0 32,961 765,542 115,223.0 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
292
+ 8.8 3,277,208,753 47,634 68,799.8 77,473.0 800 81,121 21,559.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
293
+ 5.6 2,083,505,861 1,271 1,639,265.0 1,387,852.0 39,745 4,515,436 1,121,994.0 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
294
+ 5.1 1,891,072,505 20,210 93,571.1 20,224.0 1,055 481,762 169,672.7 triton_poi_fused_mul_silu_1
295
+ 4.3 1,624,497,751 101,584 15,991.7 8,544.0 6,367 81,025 11,832.8 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
296
+ 3.3 1,229,556,187 9,203 133,603.8 42,977.0 7,648 557,317 169,265.9 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
297
+ 2.5 940,467,368 5,865 160,352.5 13,312.0 1,984 1,008,298 288,283.2 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
298
+ 2.2 808,325,696 2,044 395,462.7 496,644.0 10,592 510,980 192,958.7 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
299
+ 1.9 720,826,362 5,867 122,861.1 9,824.0 5,120 714,183 210,401.9 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
300
+ 1.8 666,198,895 287,392 2,318.1 1,920.0 1,536 6,368 968.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
301
+ 1.6 609,901,106 6,048 100,843.4 32,161.0 6,912 3,159,614 305,220.1 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
302
+ 1.5 576,753,139 13,496 42,735.1 42,624.0 26,016 102,081 3,914.6 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
303
+ 1.3 487,869,104 13,020 37,470.7 37,440.0 36,640 42,816 321.1 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
304
+ 1.2 431,150,928 22,086 19,521.5 3,489.0 1,344 75,104 27,792.8 triton_poi_fused_cat_3
305
+ 0.9 347,479,864 341 1,019,002.5 598,630.0 373,475 2,788,687 741,262.9 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
306
+ 0.8 316,815,668 1,904 166,394.8 154,193.5 40,352 1,291,207 149,498.4 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
307
+ 0.7 264,631,581 521 507,930.1 507,844.0 506,244 519,973 763.9 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x1_tn_align8>(T1::Par…
308
+ 0.7 258,705,730 610 424,107.8 487,972.0 6,976 500,003 159,920.1 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
309
+ 0.7 248,913,221 164,164 1,516.2 1,280.0 1,023 13,249 475.7 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
310
+ 0.6 206,205,114 57,848 3,564.6 3,584.0 3,295 3,936 112.4 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
311
+ 0.5 203,900,076 143,696 1,419.0 1,344.0 1,183 2,208 221.7 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
312
+ 0.5 189,670,922 16,968 11,178.2 12,256.0 1,536 111,105 5,147.2 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
313
+ 0.5 173,673,906 22,086 7,863.5 2,368.0 832 25,696 9,575.0 triton_poi_fused_view_5
314
+ 0.5 170,442,702 61,516 2,770.7 2,432.0 1,247 17,088 1,351.4 void vllm::merge_attn_states_kernel<__nv_bfloat16, (unsigned int)128>(T1 *, float *, const T1 *, co…
315
+ 0.3 95,480,145 22,086 4,323.1 1,376.0 1,215 15,104 5,013.1 triton_poi_fused_cat_4
316
+ 0.2 88,249,892 16,968 5,201.0 5,473.0 1,535 80,096 3,526.8 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
317
+ 0.2 58,167,224 15,232 3,818.8 3,840.0 3,711 4,032 31.4 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
318
+ 0.1 45,137,635 14,420 3,130.2 3,167.0 3,008 3,457 59.7 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
319
+ 0.1 43,394,155 14,084 3,081.1 3,072.0 3,008 3,200 19.7 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
320
+ 0.1 39,138,907 8 4,892,363.4 4,849,279.0 4,795,198 5,088,513 110,842.2 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
321
+ 0.1 22,694,961 784 28,947.7 12,575.5 11,744 62,528 20,606.8 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
322
+ 0.1 20,952,447 5,999 3,492.7 3,104.0 2,751 7,392 961.1 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
323
+ 0.1 20,383,074 4 5,095,768.5 5,085,968.5 4,915,359 5,295,778 195,206.9 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
324
+ 0.0 15,859,055 818 19,387.6 3,136.0 1,600 74,881 27,973.6 triton_poi_fused_cat_1
325
+ 0.0 14,288,503 28 510,303.7 511,683.0 469,923 513,059 7,932.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
326
+ 0.0 9,813,089 4 2,453,272.3 2,468,672.5 2,391,504 2,484,240 42,132.0 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
327
+ 0.0 9,653,448 448 21,547.9 21,345.0 21,120 24,928 817.3 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn
328
+ 0.0 9,408,780 5,863 1,604.8 1,376.0 1,120 2,752 455.9 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
329
+ 0.0 8,489,112 224 37,897.8 37,856.0 36,545 39,328 536.2 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
330
+ 0.0 8,311,916 28 296,854.1 294,593.5 293,505 332,482 7,194.4 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
331
+ 0.0 7,845,574 9,023 869.5 864.0 767 1,281 77.2 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
332
+ 0.0 7,775,377 2 3,887,688.5 3,887,688.5 3,705,239 4,070,138 258,022.6 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
333
+ 0.0 7,471,240 5,865 1,273.9 1,120.0 991 2,080 282.3 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
334
+ 0.0 7,214,254 336 21,471.0 21,408.0 21,056 22,625 299.8 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
335
+ 0.0 6,324,707 818 7,731.9 2,368.0 863 24,832 9,334.7 triton_poi_fused_view_3
336
+ 0.0 6,169,925 476 12,962.0 12,864.0 11,776 14,304 602.5 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
337
+ 0.0 5,903,909 4 1,475,977.3 1,475,449.0 1,472,585 1,480,426 3,980.7 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
338
+ 0.0 5,501,036 5,863 938.3 928.0 895 1,312 71.9 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
339
+ 0.0 4,936,334 28 176,297.6 176,817.5 174,209 178,978 1,446.6 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_relu_f2f_tn
340
+ 0.0 4,754,998 5,432 875.4 864.0 800 1,217 36.3 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
341
+ 0.0 3,995,930 2 1,997,965.0 1,997,965.0 1,996,108 1,999,822 2,626.2 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
342
+ 0.0 3,598,526 56 64,259.4 64,320.5 63,104 65,313 559.8 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
343
+ 0.0 3,541,818 818 4,329.9 1,376.0 1,215 14,815 5,022.4 triton_poi_fused_cat_2
344
+ 0.0 3,434,102 4 858,525.5 858,069.5 855,685 862,278 2,939.8 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
345
+ 0.0 3,192,725 2 1,596,362.5 1,596,362.5 1,563,050 1,629,675 47,111.0 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
346
+ 0.0 2,897,906 1,512 1,916.6 1,824.0 1,312 2,976 438.1 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
347
+ 0.0 2,593,403 606 4,279.5 4,384.0 1,984 35,904 1,445.1 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
348
+ 0.0 2,582,288 2 1,291,144.0 1,291,144.0 1,290,536 1,291,752 859.8 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
349
+ 0.0 2,580,625 2 1,290,312.5 1,290,312.5 1,288,393 1,292,232 2,714.6 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
350
+ 0.0 2,421,451 112 21,620.1 21,552.0 9,376 34,400 12,006.5 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
351
+ 0.0 1,377,256 2 688,628.0 688,628.0 682,820 694,436 8,213.8 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
352
+ 0.0 1,128,024 1,252 901.0 896.0 800 1,280 45.2 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
353
+ 0.0 957,446 28 34,194.5 34,768.5 17,983 35,232 3,184.5 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
354
+ 0.0 670,018 731 916.6 928.0 863 1,024 22.0 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<long>, std::array<char *, (uns…
355
+ 0.0 296,898 168 1,767.3 1,760.0 1,536 2,080 121.8 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
356
+ 0.0 157,249 1 157,249.0 157,249.0 157,249 157,249 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
357
+ 0.0 90,491 86 1,052.2 927.5 895 11,488 1,139.9 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
358
+ 0.0 78,785 1 78,785.0 78,785.0 78,785 78,785 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
359
+ 0.0 43,232 1 43,232.0 43,232.0 43,232 43,232 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
360
+ 0.0 36,737 28 1,312.0 1,312.0 1,280 1,344 17.4 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
361
+ 0.0 26,432 1 26,432.0 26,432.0 26,432 26,432 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
362
+ 0.0 19,520 1 19,520.0 19,520.0 19,520 19,520 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
363
+ 0.0 11,713 11 1,064.8 864.0 800 1,536 305.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
364
+ 0.0 10,624 2 5,312.0 5,312.0 5,024 5,600 407.3 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
365
+ 0.0 8,639 2 4,319.5 4,319.5 4,128 4,511 270.8 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
366
+ 0.0 3,616 2 1,808.0 1,808.0 1,600 2,016 294.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
367
+ 0.0 3,489 2 1,744.5 1,744.5 1,696 1,793 68.6 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
368
+ 0.0 3,103 2 1,551.5 1,551.5 1,503 1,600 68.6 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
369
+ 0.0 2,976 2 1,488.0 1,488.0 1,376 1,600 158.4 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
370
+ 0.0 2,975 2 1,487.5 1,487.5 991 1,984 702.2 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
371
+ 0.0 2,944 2 1,472.0 1,472.0 1,344 1,600 181.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
372
+ 0.0 2,400 1 2,400.0 2,400.0 2,400 2,400 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
373
+ 0.0 1,185 1 1,185.0 1,185.0 1,185 1,185 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
374
+ 0.0 1,025 1 1,025.0 1,025.0 1,025 1,025 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
375
+ 0.0 1,025 1 1,025.0 1,025.0 1,025 1,025 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
376
+ 0.0 896 1 896.0 896.0 896 896 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
377
+
378
+ [7/8] Executing 'cuda_gpu_mem_time_sum' stats report
379
+
380
+ Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
381
+ -------- --------------- ------ -------- -------- -------- ----------- ----------- ------------------------------
382
+ 93.8 627,226,731 42,463 14,771.1 352.0 320 112,333,155 587,539.7 [CUDA memcpy Host-to-Device]
383
+ 2.8 18,735,373 14,448 1,296.7 928.0 895 1,362,505 22,615.1 [CUDA memcpy Device-to-Device]
384
+ 2.4 16,204,705 24,393 664.3 768.0 320 8,224 282.8 [CUDA memset]
385
+ 1.0 6,719,471 5,919 1,135.2 1,120.0 863 1,920 102.9 [CUDA memcpy Device-to-Host]
386
+
387
+ [8/8] Executing 'cuda_gpu_mem_size_sum' stats report
388
+
389
+ Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
390
+ ---------- ------ -------- -------- -------- -------- ----------- ------------------------------
391
+ 4,194.770 42,463 0.099 0.000 0.000 466.747 2.582 [CUDA memcpy Host-to-Device]
392
+ 2,533.618 14,448 0.175 0.003 0.000 622.330 10.354 [CUDA memcpy Device-to-Device]
393
+ 17.613 24,393 0.001 0.001 0.000 0.006 0.000 [CUDA memset]
394
+ 4.192 5,919 0.001 0.000 0.000 0.004 0.001 [CUDA memcpy Device-to-Host]
395
+
396
+ Generated:
397
+ /data/cy/kv_cache_vs_util/std_traverse_bs/traverse_bs_util_std.nsys-rep
398
+ /data/cy/kv_cache_vs_util/std_traverse_bs/traverse_bs_util_std.sqlite
std_traverse_bs/traverse_bs_util_std.nsys-rep ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdc4881551bfa6904baceba8d047d0fc2738308fbadb2d1c3d42a14840041bd6
3
+ size 134872655
std_traverse_bs/traverse_bs_util_std.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import statistics
4
+ from typing import List, Tuple, Dict
5
+
6
+ import torch
7
+ import torch.cuda.nvtx as nvtx
8
+
9
+ from vllm import LLM, SamplingParams
10
+ from transformers import AutoTokenizer
11
+
12
+ # ========= 强制使用 vLLM V1 =========
13
+ os.environ.setdefault("VLLM_USE_V1", "1")
14
+ os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
15
+
16
+ # 可选:打开 V1 metrics 统计
17
+ os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
18
+
19
+ # ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
20
+ try:
21
+ from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
22
+ except Exception:
23
+ Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
24
+
25
+ # ========= 配置 =========
26
+ MODEL_NAME = "Qwen/Qwen2-1.5B"
27
+ DTYPE = "bfloat16"
28
+ TP = 1
29
+ GPU_MEM_UTIL = 0.90
30
+ TRUST_REMOTE_CODE = True
31
+
32
+ # 场景:prefill=输入tokens,decode=输出tokens
33
+ SCENARIOS = [
34
+ # {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
35
+ # {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
36
+ {"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
37
+ ]
38
+
39
+ BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
40
+
41
+ SEED = 1234
42
+ TEMPERATURE = 0.0
43
+ TOP_P = 1.0
44
+ WARMUP_PER_BS = 1 # 每个批次做一次预热
45
+
46
+ # ========= 构造“精确 token 数量”的 prompt =========
47
+ def build_exact_token_prompt(tokenizer, target_len: int) -> str:
48
+ if target_len <= 1:
49
+ # 最小化 prompt:用一个简单 token(避免空串导致0 token)
50
+ ids = tokenizer("A", add_special_tokens=False)["input_ids"]
51
+ if len(ids) >= 1:
52
+ return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
53
+
54
+ base_text = (
55
+ "You are a helpful assistant. "
56
+ "Please analyze the following input and respond succinctly. "
57
+ )
58
+ chunk = " ".join(["data"] * 100) + ". "
59
+ text = base_text + chunk * 200 # 足够长的文本
60
+
61
+ lo, hi = 0, len(text)
62
+ target_ids = None
63
+ while lo <= hi:
64
+ mid = (lo + hi) // 2
65
+ ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
66
+ if len(ids) == target_len:
67
+ target_ids = ids
68
+ break
69
+ if len(ids) < target_len:
70
+ lo = mid + 1
71
+ else:
72
+ hi = mid - 1
73
+
74
+ if target_ids is None:
75
+ ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
76
+ if len(ids) > target_len:
77
+ target_ids = ids[:target_len]
78
+ else:
79
+ filler = " data"
80
+ while len(ids) < target_len:
81
+ ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
82
+ target_ids = ids[:target_len]
83
+
84
+ prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
85
+ # 断言精确长度
86
+ assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
87
+ return prompt
88
+
89
+ # ========= V1 metrics 抽取工具 =========
90
+ TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
91
+ TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
92
+
93
+ def _iter_children_of_vector(vec_obj):
94
+ for attr in ("children", "metrics", "series", "values", "samples", "items"):
95
+ if hasattr(vec_obj, attr):
96
+ val = getattr(vec_obj, attr)
97
+ if isinstance(val, dict):
98
+ for v in val.values():
99
+ yield v
100
+ else:
101
+ try:
102
+ for v in val:
103
+ yield v
104
+ except TypeError:
105
+ pass
106
+
107
+ def _collect_hist_sum_count(metrics, metric_name: str):
108
+ total_sum = 0.0
109
+ total_count = 0.0
110
+ for m in metrics:
111
+ mname = getattr(m, "name", None)
112
+ if mname != metric_name:
113
+ continue
114
+ # 直接 Histogram
115
+ if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
116
+ total_sum += float(getattr(m, "sum", 0.0))
117
+ total_count += float(getattr(m, "count", 0.0))
118
+ continue
119
+ # Vector[Histogram]
120
+ if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
121
+ for child in _iter_children_of_vector(m):
122
+ if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
123
+ total_sum += float(getattr(child, "sum", 0.0))
124
+ total_count += float(getattr(child, "count", 0.0))
125
+ return total_sum, total_count
126
+
127
+ def _metrics_snapshot(llm) -> Dict[str, float]:
128
+ try:
129
+ mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
130
+ except Exception:
131
+ return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
132
+ ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
133
+ tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
134
+ return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
135
+
136
+ def _metrics_delta(before: dict, after: dict):
137
+ return {
138
+ "ttft_sum": after["ttft_sum"] - before["ttft_sum"],
139
+ "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
140
+ "tpot_sum": after["tpot_sum"] - before["tpot_sum"],
141
+ "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
142
+ }
143
+
144
+ # ========= 带 NVTX 的 generate 包装 =========
145
+ def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
146
+ return llm.generate(prompts, params)
147
+
148
+ # ========= 统计格式化 =========
149
+ def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
150
+ xs = [v for v in x if (v == v)] # 过滤 NaN
151
+ if not xs:
152
+ return (float("nan"), float("nan"), float("nan"))
153
+ return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
154
+
155
+ def main():
156
+ print("--- vLLM V1 基准测试(含 NVTX 标记)---")
157
+ print(f"模型: {MODEL_NAME}")
158
+ print(f"批量大小: {BATCH_SIZES}")
159
+ print(f"场景: {[s['name'] for s in SCENARIOS]}")
160
+ print("-" * 60)
161
+
162
+ if not torch.cuda.is_available():
163
+ print("错误:需要 CUDA GPU。")
164
+ return
165
+
166
+ print("加载分词器/模型中...")
167
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
168
+
169
+ # 用 NVTX 标记模型加载阶段
170
+ nvtx.range_push("LLM_init")
171
+ llm = LLM(
172
+ model=MODEL_NAME,
173
+ tensor_parallel_size=TP,
174
+ dtype=DTYPE,
175
+ trust_remote_code=TRUST_REMOTE_CODE,
176
+ gpu_memory_utilization=GPU_MEM_UTIL,
177
+ max_num_seqs=1024, # 足够覆盖本次扫描
178
+ max_model_len=4096,
179
+ disable_log_stats=False, # 开启 V1 metrics 收集
180
+ )
181
+ nvtx.range_pop()
182
+ print("模型加载完成。")
183
+
184
+ for sc in SCENARIOS:
185
+ name = sc["name"]
186
+ prompt_tokens = sc["prompt_tokens"]
187
+ max_new_tokens = sc["max_new_tokens"]
188
+
189
+ print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
190
+
191
+ # 准备精确长度 prompt
192
+ prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
193
+
194
+ # 采样参数(贪心)
195
+ sampling_params = SamplingParams(
196
+ max_tokens=max_new_tokens,
197
+ temperature=TEMPERATURE,
198
+ top_p=TOP_P,
199
+ seed=SEED,
200
+ n=1,
201
+ )
202
+
203
+ # 记录每个 bs 的结果(便于后续统计或外部解析)
204
+ for bs in BATCH_SIZES:
205
+ print(f"\n--- 批量大小 bs={bs} ---")
206
+
207
+ prompts = [prompt_text] * bs
208
+
209
+ # 预热
210
+ # print("预热中...")
211
+ # nvtx.range_push(f"WARMUP [{name}] bs={bs}")
212
+ # _ = decorated_generate(llm, [prompts[0]], sampling_params)
213
+ # torch.cuda.synchronize()
214
+ # nvtx.range_pop()
215
+
216
+ # 正式计时与 V1 metrics
217
+ # nvtx.range_push(f"RUN [{name}] bs={bs}")
218
+ torch.cuda.synchronize()
219
+ snap_before = _metrics_snapshot(llm)
220
+ t0 = time.perf_counter()
221
+
222
+ nvtx.range_push(f"generate [{name}] bs={bs}")
223
+ outputs = decorated_generate(llm, prompts, sampling_params)
224
+ nvtx.range_pop() # generate
225
+
226
+ torch.cuda.synchronize()
227
+ t1 = time.perf_counter()
228
+ snap_after = _metrics_snapshot(llm)
229
+ # nvtx.range_pop() # RUN
230
+
231
+ duration = t1 - t0
232
+
233
+ # 统计 token 与吞吐
234
+ total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
235
+ avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
236
+ throughput = total_output_tokens / duration if duration > 0 else float("inf")
237
+
238
+ # 解析 V1 TTFT / 解码吞吐
239
+ delta = _metrics_delta(snap_before, snap_after)
240
+ if delta["ttft_cnt"] > 0:
241
+ ttft = delta["ttft_sum"] / delta["ttft_cnt"]
242
+ else:
243
+ ttft = float("nan")
244
+
245
+ if delta["tpot_cnt"] > 0:
246
+ avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
247
+ decode_tps = 1.0 / avg_tpot
248
+ else:
249
+ decode_tps = float("nan")
250
+
251
+ print(f"执行时间: {duration:.4f} s")
252
+ print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
253
+ print(f"生成总 tokens: {total_output_tokens}")
254
+ print(f"吞吐(生成tokens/秒): {throughput:.2f}")
255
+ print(f"TTFT (V1 metrics): {ttft:.4f} s")
256
+ print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
257
+
258
+ print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
259
+
260
+ if __name__ == "__main__":
261
+ print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
262
+ main()
traverse_bs_util.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import statistics
4
+ from typing import List, Tuple, Dict
5
+
6
+ import torch
7
+ import torch.cuda.nvtx as nvtx
8
+
9
+ from vllm import LLM, SamplingParams
10
+ from transformers import AutoTokenizer
11
+
12
+ # ========= 强制使用 vLLM V1 =========
13
+ os.environ.setdefault("VLLM_USE_V1", "1")
14
+ os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
15
+
16
+ # 可选:打开 V1 metrics 统计
17
+ os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
18
+
19
+ # ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
20
+ try:
21
+ from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
22
+ except Exception:
23
+ Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
24
+
25
+ # ========= 配置 =========
26
+ MODEL_NAME = "Qwen/Qwen2-1.5B"
27
+ DTYPE = "bfloat16"
28
+ TP = 1
29
+ GPU_MEM_UTIL = 0.90
30
+ TRUST_REMOTE_CODE = True
31
+
32
+ # 场景:prefill=输入tokens,decode=输出tokens
33
+ SCENARIOS = [
34
+ {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
35
+ {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
36
+ ]
37
+
38
+ BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256]
39
+
40
+ SEED = 1234
41
+ TEMPERATURE = 0.0
42
+ TOP_P = 1.0
43
+ WARMUP_PER_BS = 1 # 每个批次做一次预热
44
+
45
+ # ========= 构造“精确 token 数量”的 prompt =========
46
+ def build_exact_token_prompt(tokenizer, target_len: int) -> str:
47
+ if target_len <= 1:
48
+ # 最小化 prompt:用一个简单 token(避免空串导致0 token)
49
+ ids = tokenizer("A", add_special_tokens=False)["input_ids"]
50
+ if len(ids) >= 1:
51
+ return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
52
+
53
+ base_text = (
54
+ "You are a helpful assistant. "
55
+ "Please analyze the following input and respond succinctly. "
56
+ )
57
+ chunk = " ".join(["data"] * 100) + ". "
58
+ text = base_text + chunk * 200 # 足够长的文本
59
+
60
+ lo, hi = 0, len(text)
61
+ target_ids = None
62
+ while lo <= hi:
63
+ mid = (lo + hi) // 2
64
+ ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
65
+ if len(ids) == target_len:
66
+ target_ids = ids
67
+ break
68
+ if len(ids) < target_len:
69
+ lo = mid + 1
70
+ else:
71
+ hi = mid - 1
72
+
73
+ if target_ids is None:
74
+ ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
75
+ if len(ids) > target_len:
76
+ target_ids = ids[:target_len]
77
+ else:
78
+ filler = " data"
79
+ while len(ids) < target_len:
80
+ ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
81
+ target_ids = ids[:target_len]
82
+
83
+ prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
84
+ # 断言精确长度
85
+ assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
86
+ return prompt
87
+
88
+ # ========= V1 metrics 抽取工具 =========
89
+ TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
90
+ TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
91
+
92
+ def _iter_children_of_vector(vec_obj):
93
+ for attr in ("children", "metrics", "series", "values", "samples", "items"):
94
+ if hasattr(vec_obj, attr):
95
+ val = getattr(vec_obj, attr)
96
+ if isinstance(val, dict):
97
+ for v in val.values():
98
+ yield v
99
+ else:
100
+ try:
101
+ for v in val:
102
+ yield v
103
+ except TypeError:
104
+ pass
105
+
106
+ def _collect_hist_sum_count(metrics, metric_name: str):
107
+ total_sum = 0.0
108
+ total_count = 0.0
109
+ for m in metrics:
110
+ mname = getattr(m, "name", None)
111
+ if mname != metric_name:
112
+ continue
113
+ # 直接 Histogram
114
+ if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
115
+ total_sum += float(getattr(m, "sum", 0.0))
116
+ total_count += float(getattr(m, "count", 0.0))
117
+ continue
118
+ # Vector[Histogram]
119
+ if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
120
+ for child in _iter_children_of_vector(m):
121
+ if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
122
+ total_sum += float(getattr(child, "sum", 0.0))
123
+ total_count += float(getattr(child, "count", 0.0))
124
+ return total_sum, total_count
125
+
126
+ def _metrics_snapshot(llm) -> Dict[str, float]:
127
+ try:
128
+ mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
129
+ except Exception:
130
+ return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
131
+ ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
132
+ tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
133
+ return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
134
+
135
+ def _metrics_delta(before: dict, after: dict):
136
+ return {
137
+ "ttft_sum": after["ttft_sum"] - before["ttft_sum"],
138
+ "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
139
+ "tpot_sum": after["tpot_sum"] - before["tpot_sum"],
140
+ "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
141
+ }
142
+
143
+ # ========= 带 NVTX 的 generate 包装 =========
144
+ def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
145
+ return llm.generate(prompts, params)
146
+
147
+ # ========= 统计格式化 =========
148
+ def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
149
+ xs = [v for v in x if (v == v)] # 过滤 NaN
150
+ if not xs:
151
+ return (float("nan"), float("nan"), float("nan"))
152
+ return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
153
+
154
+ def main():
155
+ print("--- vLLM V1 基准测试(含 NVTX 标记)---")
156
+ print(f"模型: {MODEL_NAME}")
157
+ print(f"批量大小: {BATCH_SIZES}")
158
+ print(f"场景: {[s['name'] for s in SCENARIOS]}")
159
+ print("-" * 60)
160
+
161
+ if not torch.cuda.is_available():
162
+ print("错误:需要 CUDA GPU。")
163
+ return
164
+
165
+ print("加载分词器/模型中...")
166
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
167
+
168
+ # 用 NVTX 标记模型加载阶段
169
+ nvtx.range_push("LLM_init")
170
+ llm = LLM(
171
+ model=MODEL_NAME,
172
+ tensor_parallel_size=TP,
173
+ dtype=DTYPE,
174
+ trust_remote_code=TRUST_REMOTE_CODE,
175
+ gpu_memory_utilization=GPU_MEM_UTIL,
176
+ max_num_seqs=256, # 足够覆盖本次扫描
177
+ max_model_len=8192,
178
+ disable_log_stats=False, # 开启 V1 metrics 收集
179
+ )
180
+ nvtx.range_pop()
181
+ print("模型加载完成。")
182
+
183
+ for sc in SCENARIOS:
184
+ name = sc["name"]
185
+ prompt_tokens = sc["prompt_tokens"]
186
+ max_new_tokens = sc["max_new_tokens"]
187
+
188
+ print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
189
+
190
+ # 准备精确长度 prompt
191
+ prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
192
+
193
+ # 采样参数(贪心)
194
+ sampling_params = SamplingParams(
195
+ max_tokens=max_new_tokens,
196
+ temperature=TEMPERATURE,
197
+ top_p=TOP_P,
198
+ seed=SEED,
199
+ n=1,
200
+ )
201
+
202
+ # 记录每个 bs 的结果(便于后续统计或外部解析)
203
+ for bs in BATCH_SIZES:
204
+ print(f"\n--- 批量大小 bs={bs} ---")
205
+
206
+ prompts = [prompt_text] * bs
207
+
208
+ # 预热
209
+ print("预热中...")
210
+ nvtx.range_push(f"WARMUP [{name}] bs={bs}")
211
+ _ = decorated_generate(llm, [prompts[0]], sampling_params)
212
+ torch.cuda.synchronize()
213
+ nvtx.range_pop()
214
+
215
+ # 正式计时与 V1 metrics
216
+ nvtx.range_push(f"RUN [{name}] bs={bs}")
217
+ torch.cuda.synchronize()
218
+ snap_before = _metrics_snapshot(llm)
219
+ t0 = time.perf_counter()
220
+
221
+ nvtx.range_push(f"generate [{name}] bs={bs}")
222
+ outputs = decorated_generate(llm, prompts, sampling_params)
223
+ nvtx.range_pop() # generate
224
+
225
+ torch.cuda.synchronize()
226
+ t1 = time.perf_counter()
227
+ snap_after = _metrics_snapshot(llm)
228
+ nvtx.range_pop() # RUN
229
+
230
+ duration = t1 - t0
231
+
232
+ # 统计 token 与吞吐
233
+ total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
234
+ avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
235
+ throughput = total_output_tokens / duration if duration > 0 else float("inf")
236
+
237
+ # 解析 V1 TTFT / 解码吞吐
238
+ delta = _metrics_delta(snap_before, snap_after)
239
+ if delta["ttft_cnt"] > 0:
240
+ ttft = delta["ttft_sum"] / delta["ttft_cnt"]
241
+ else:
242
+ ttft = float("nan")
243
+
244
+ if delta["tpot_cnt"] > 0:
245
+ avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
246
+ decode_tps = 1.0 / avg_tpot
247
+ else:
248
+ decode_tps = float("nan")
249
+
250
+ print(f"执行时间: {duration:.4f} s")
251
+ print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
252
+ print(f"生成总 tokens: {total_output_tokens}")
253
+ print(f"吞吐(生成tokens/秒): {throughput:.2f}")
254
+ print(f"TTFT (V1 metrics): {ttft:.4f} s")
255
+ print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
256
+
257
+ print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
258
+
259
+ if __name__ == "__main__":
260
+ print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
261
+ main()
traverse_bs_util_std_kvcache.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import statistics
4
+ from typing import List, Tuple, Dict
5
+
6
+ import torch
7
+ import torch.cuda.nvtx as nvtx
8
+
9
+ from vllm import LLM, SamplingParams
10
+ from transformers import AutoTokenizer
11
+
12
+ # ========= 强制使用 vLLM V1 =========
13
+ os.environ.setdefault("VLLM_USE_V1", "1")
14
+ os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
15
+
16
+ # 可选:打开 V1 metrics 统计
17
+ os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
18
+
19
+ # ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
20
+ try:
21
+ from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
22
+ except Exception:
23
+ Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
24
+
25
+ # ========= 配置 =========
26
+ MODEL_NAME = "Qwen/Qwen2-1.5B"
27
+ DTYPE = "bfloat16"
28
+ TP = 1
29
+ GPU_MEM_UTIL = 0.90
30
+ TRUST_REMOTE_CODE = True
31
+
32
+ # 场景:prefill=输入tokens,decode=输出tokens
33
+ SCENARIOS = [
34
+ # {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
35
+ # {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
36
+ {"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
37
+ ]
38
+
39
+ BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256]
40
+
41
+ SEED = 1234
42
+ TEMPERATURE = 0.0
43
+ TOP_P = 1.0
44
+ WARMUP_PER_BS = 1 # 每个批次做一次预热
45
+
46
+ # ========= 构造“精确 token 数量”的 prompt =========
47
+ def build_exact_token_prompt(tokenizer, target_len: int) -> str:
48
+ if target_len <= 1:
49
+ # 最小化 prompt:用一个简单 token(避免空串导致0 token)
50
+ ids = tokenizer("A", add_special_tokens=False)["input_ids"]
51
+ if len(ids) >= 1:
52
+ return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
53
+
54
+ base_text = (
55
+ "You are a helpful assistant. "
56
+ "Please analyze the following input and respond succinctly. "
57
+ )
58
+ chunk = " ".join(["data"] * 100) + ". "
59
+ text = base_text + chunk * 200 # 足够长的文本
60
+
61
+ lo, hi = 0, len(text)
62
+ target_ids = None
63
+ while lo <= hi:
64
+ mid = (lo + hi) // 2
65
+ ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
66
+ if len(ids) == target_len:
67
+ target_ids = ids
68
+ break
69
+ if len(ids) < target_len:
70
+ lo = mid + 1
71
+ else:
72
+ hi = mid - 1
73
+
74
+ if target_ids is None:
75
+ ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
76
+ if len(ids) > target_len:
77
+ target_ids = ids[:target_len]
78
+ else:
79
+ filler = " data"
80
+ while len(ids) < target_len:
81
+ ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
82
+ target_ids = ids[:target_len]
83
+
84
+ prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
85
+ # 断言精确长度
86
+ assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
87
+ return prompt
88
+
89
+ # ========= V1 metrics 抽取工具 =========
90
+ TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
91
+ TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
92
+
93
+ def _iter_children_of_vector(vec_obj):
94
+ for attr in ("children", "metrics", "series", "values", "samples", "items"):
95
+ if hasattr(vec_obj, attr):
96
+ val = getattr(vec_obj, attr)
97
+ if isinstance(val, dict):
98
+ for v in val.values():
99
+ yield v
100
+ else:
101
+ try:
102
+ for v in val:
103
+ yield v
104
+ except TypeError:
105
+ pass
106
+
107
+ def _collect_hist_sum_count(metrics, metric_name: str):
108
+ total_sum = 0.0
109
+ total_count = 0.0
110
+ for m in metrics:
111
+ mname = getattr(m, "name", None)
112
+ if mname != metric_name:
113
+ continue
114
+ # 直接 Histogram
115
+ if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
116
+ total_sum += float(getattr(m, "sum", 0.0))
117
+ total_count += float(getattr(m, "count", 0.0))
118
+ continue
119
+ # Vector[Histogram]
120
+ if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
121
+ for child in _iter_children_of_vector(m):
122
+ if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
123
+ total_sum += float(getattr(child, "sum", 0.0))
124
+ total_count += float(getattr(child, "count", 0.0))
125
+ return total_sum, total_count
126
+
127
+ def _metrics_snapshot(llm) -> Dict[str, float]:
128
+ try:
129
+ mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
130
+ except Exception:
131
+ return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
132
+ ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
133
+ tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
134
+ return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
135
+
136
+ def _metrics_delta(before: dict, after: dict):
137
+ return {
138
+ "ttft_sum": after["ttft_sum"] - before["ttft_sum"],
139
+ "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
140
+ "tpot_sum": after["tpot_sum"] - before["tpot_sum"],
141
+ "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
142
+ }
143
+
144
+ # ========= 内存 / KV cache 统计辅助 =========
145
+ def _try_import_pynvml():
146
+ try:
147
+ import pynvml # type: ignore
148
+ pynvml.nvmlInit()
149
+ return pynvml
150
+ except Exception:
151
+ return None
152
+
153
+ def _gpu_mem_stats(device_idx: int = 0):
154
+ # PyTorch 视角
155
+ try:
156
+ torch.cuda.synchronize(device_idx)
157
+ allocated = torch.cuda.memory_allocated(device_idx)
158
+ reserved = torch.cuda.memory_reserved(device_idx)
159
+ max_alloc = torch.cuda.max_memory_allocated(device_idx)
160
+ max_resv = torch.cuda.max_memory_reserved(device_idx)
161
+ except Exception:
162
+ allocated = reserved = max_alloc = max_resv = float("nan")
163
+
164
+ # NVML 视角(接近 nvidia-smi)
165
+ pynvml = _try_import_pynvml()
166
+ nvml_total = nvml_used = nvml_free = float("nan")
167
+ if pynvml is not None:
168
+ try:
169
+ h = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
170
+ mem = pynvml.nvmlDeviceGetMemoryInfo(h)
171
+ nvml_total, nvml_used, nvml_free = float(mem.total), float(mem.used), float(mem.free)
172
+ except Exception:
173
+ pass
174
+
175
+ def _fmt(b):
176
+ # 以 MiB 显示
177
+ if b != b: # NaN
178
+ return "NaN"
179
+ return f"{b/1024/1024:.1f} MiB"
180
+
181
+ return {
182
+ "torch_allocated": _fmt(allocated),
183
+ "torch_reserved": _fmt(reserved),
184
+ "torch_max_alloc": _fmt(max_alloc),
185
+ "torch_max_resvd": _fmt(max_resv),
186
+ "nvml_total": _fmt(nvml_total),
187
+ "nvml_used": _fmt(nvml_used),
188
+ "nvml_free": _fmt(nvml_free),
189
+ }
190
+
191
+ # —— vLLM v1 metrics 中收集 Gauge/Counter 最新值(含 Vector 包装)——
192
+ def _collect_latest_metric(metrics, metric_name: str):
193
+ latest = None
194
+ for m in metrics:
195
+ mname = getattr(m, "name", None)
196
+ is_vector = getattr(m, "__class__", type("X", (), {})).__name__ in ("Vector",)
197
+ if mname != metric_name and not (is_vector and any(getattr(child, "name", None) == metric_name for child in _iter_children_of_vector(m))):
198
+ continue
199
+ # 直接 Gauge/Counter
200
+ if m.__class__.__name__ in ("Gauge", "Counter"):
201
+ val = getattr(m, "value", None)
202
+ if val is None:
203
+ val = getattr(m, "_value", None)
204
+ if isinstance(val, (int, float)):
205
+ latest = float(val)
206
+ # Vector[Gauge/Counter]
207
+ if is_vector:
208
+ for child in _iter_children_of_vector(m):
209
+ if getattr(child, "name", None) == metric_name and child.__class__.__name__ in ("Gauge", "Counter"):
210
+ val = getattr(child, "value", None)
211
+ if val is None:
212
+ val = getattr(child, "_value", None)
213
+ if isinstance(val, (int, float)):
214
+ latest = float(val)
215
+ return latest
216
+
217
+ # —— 从 metrics 中“尽力而为”获取 KV cache 使用情况 ——
218
+ def _kv_cache_stats_from_metrics(llm):
219
+ try:
220
+ mets = llm.get_metrics()
221
+ except Exception:
222
+ return None
223
+
224
+ # 常见/候选指标名(不同版本命名可能略有差异,尽可能覆盖)
225
+ names_used_blocks = [
226
+ "vllm:kv_cache_gpu_blocks_in_use",
227
+ "vllm:kv_cache:gpu_blocks_in_use",
228
+ "vllm:cache:gpu_blocks_in_use",
229
+ "vllm:num_gpu_blocks_in_use",
230
+ ]
231
+ names_total_blocks = [
232
+ "vllm:kv_cache_gpu_blocks_total",
233
+ "vllm:kv_cache:gpu_blocks_total",
234
+ "vllm:cache:gpu_blocks_total",
235
+ "vllm:num_gpu_blocks_total",
236
+ ]
237
+ names_used_bytes = [
238
+ "vllm:kv_cache_usage_bytes",
239
+ "vllm:kv_cache:usage_bytes",
240
+ "vllm:cache:kv_bytes_used",
241
+ ]
242
+ used_blocks = None
243
+ total_blocks = None
244
+ used_bytes = None
245
+
246
+ for n in names_used_blocks:
247
+ v = _collect_latest_metric(mets, n)
248
+ if v is not None:
249
+ used_blocks = v; break
250
+ for n in names_total_blocks:
251
+ v = _collect_latest_metric(mets, n)
252
+ if v is not None:
253
+ total_blocks = v; break
254
+ for n in names_used_bytes:
255
+ v = _collect_latest_metric(mets, n)
256
+ if v is not None:
257
+ used_bytes = v; break
258
+
259
+ util = None
260
+ if used_blocks is not None and total_blocks and total_blocks > 0:
261
+ util = used_blocks / total_blocks
262
+
263
+ def _fmt_bytes(b):
264
+ if b is None: return None
265
+ return f"{b/1024/1024:.1f} MiB"
266
+
267
+ return {
268
+ "used_blocks": used_blocks,
269
+ "total_blocks": total_blocks,
270
+ "utilization": (f"{util*100:.1f}%" if util is not None else None),
271
+ "used_bytes": _fmt_bytes(used_bytes),
272
+ }
273
+
274
+ # —— 回退:从内部引擎尝试估算 KV cache(不同版本可能不兼容,失败就返回 None)——
275
+ def _kv_cache_stats_from_engine(llm):
276
+ try:
277
+ eng = getattr(llm, "llm_engine", None)
278
+ if eng is None:
279
+ return None
280
+ # 常见位置:eng.scheduler.cache_config 或 eng.cache_config
281
+ cfg = getattr(getattr(eng, "scheduler", eng), "cache_config", None) or getattr(eng, "cache_config", None)
282
+ if cfg is None:
283
+ return None
284
+ num_gpu_blocks = getattr(cfg, "num_gpu_blocks", None)
285
+ block_size = getattr(cfg, "block_size", None) # 通常为 tokens/块
286
+ # 注:准确的字节大小依赖实现细节(层数、头数、dtype等),这里仅报告 blocks 维度,避免误导
287
+ return {
288
+ "used_blocks": None, # 无法直接从 config 得到“已用”,只知道上限
289
+ "total_blocks": float(num_gpu_blocks) if num_gpu_blocks is not None else None,
290
+ "utilization": None,
291
+ "used_bytes": None,
292
+ "notes": f"block_size={block_size} (tokens per block)" if block_size is not None else None,
293
+ }
294
+ except Exception:
295
+ return None
296
+
297
+ def _print_mem_and_kv(llm, header: str):
298
+ print(f"[MEM/KV] {header}")
299
+ # GPU 内存
300
+ mem = _gpu_mem_stats(0)
301
+ print(f" PyTorch allocated/reserved: {mem['torch_allocated']} / {mem['torch_reserved']} "
302
+ f"(peak {mem['torch_max_alloc']} / {mem['torch_max_resvd']})")
303
+ if mem["nvml_total"] != "NaN":
304
+ print(f" NVML total/used/free: {mem['nvml_total']} / {mem['nvml_used']} / {mem['nvml_free']}")
305
+
306
+ # KV cache(优先从 metrics;不行再从引擎配置)
307
+ kv = _kv_cache_stats_from_metrics(llm)
308
+ if kv is None:
309
+ kv = _kv_cache_stats_from_engine(llm)
310
+ if kv is None:
311
+ print(" KV cache: 未能获取(该 vLLM 版本可能未暴露对应指标)")
312
+ else:
313
+ parts = []
314
+ if kv.get("used_blocks") is not None:
315
+ parts.append(f"used_blocks={int(kv['used_blocks'])}")
316
+ if kv.get("total_blocks") is not None:
317
+ parts.append(f"total_blocks={int(kv['total_blocks'])}")
318
+ if kv.get("utilization") is not None:
319
+ parts.append(f"util={kv['utilization']}")
320
+ if kv.get("used_bytes") is not None:
321
+ parts.append(f"used≈{kv['used_bytes']}")
322
+ if kv.get("notes"):
323
+ parts.append(kv["notes"])
324
+ if parts:
325
+ print(" KV cache: " + ", ".join(parts))
326
+ else:
327
+ print(" KV cache: 指标存在但内容为空/不兼容")
328
+
329
+ # ========= 带 NVTX 的 generate 包装 =========
330
+ def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
331
+ return llm.generate(prompts, params)
332
+
333
+ # ========= 统计格式化 =========
334
+ def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
335
+ xs = [v for v in x if (v == v)] # 过滤 NaN
336
+ if not xs:
337
+ return (float("nan"), float("nan"), float("nan"))
338
+ return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
339
+
340
+ def main():
341
+ print("--- vLLM V1 基准测试(含 NVTX 标记)---")
342
+ print(f"模型: {MODEL_NAME}")
343
+ print(f"批量大小: {BATCH_SIZES}")
344
+ print(f"场景: {[s['name'] for s in SCENARIOS]}")
345
+ print("-" * 60)
346
+
347
+ if not torch.cuda.is_available():
348
+ print("错误:需要 CUDA GPU。")
349
+ return
350
+
351
+ print("加载分词器/模型中...")
352
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
353
+
354
+ # 用 NVTX 标记模型加载阶段
355
+ nvtx.range_push("LLM_init")
356
+ llm = LLM(
357
+ model=MODEL_NAME,
358
+ tensor_parallel_size=TP,
359
+ dtype=DTYPE,
360
+ trust_remote_code=TRUST_REMOTE_CODE,
361
+ gpu_memory_utilization=GPU_MEM_UTIL,
362
+ max_num_seqs=256, # 足够覆盖本次扫描
363
+ max_model_len=8192,
364
+ disable_log_stats=False, # 开启 V1 metrics 收集
365
+ )
366
+ nvtx.range_pop()
367
+ print("模型加载完成。")
368
+ # —— 新增:打印一次基线内存/KV 状态
369
+ _print_mem_and_kv(llm, "after LLM_init")
370
+
371
+ for sc in SCENARIOS:
372
+ name = sc["name"]
373
+ prompt_tokens = sc["prompt_tokens"]
374
+ max_new_tokens = sc["max_new_tokens"]
375
+
376
+ print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
377
+
378
+ # 准备精确长度 prompt
379
+ prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
380
+
381
+ # 采样参数(贪心)
382
+ sampling_params = SamplingParams(
383
+ max_tokens=max_new_tokens,
384
+ temperature=TEMPERATURE,
385
+ top_p=TOP_P,
386
+ seed=SEED,
387
+ n=1,
388
+ )
389
+
390
+ # 记录每个 bs 的结果(便于后续统计或外部解析)
391
+ for bs in BATCH_SIZES:
392
+ print(f"\n--- 批量大小 bs={bs} ---")
393
+
394
+ prompts = [prompt_text] * bs
395
+
396
+ # 预热(如需)
397
+ # print("预热中...")
398
+ # nvtx.range_push(f"WARMUP [{name}] bs={bs}")
399
+ # _ = decorated_generate(llm, [prompts[0]], sampling_params)
400
+ # torch.cuda.synchronize()
401
+ # nvtx.range_pop()
402
+
403
+ # 正式计时与 V1 metrics
404
+ # nvtx.range_push(f"RUN [{name}] bs={bs}")
405
+ torch.cuda.synchronize()
406
+ snap_before = _metrics_snapshot(llm)
407
+ t0 = time.perf_counter()
408
+
409
+ nvtx.range_push(f"generate [{name}] bs={bs}")
410
+ outputs = decorated_generate(llm, prompts, sampling_params)
411
+ nvtx.range_pop() # generate
412
+
413
+ torch.cuda.synchronize()
414
+ t1 = time.perf_counter()
415
+ snap_after = _metrics_snapshot(llm)
416
+ # nvtx.range_pop() # RUN
417
+
418
+ duration = t1 - t0
419
+
420
+ # 统计 token 与吞吐
421
+ total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
422
+ avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
423
+ throughput = total_output_tokens / duration if duration > 0 else float("inf")
424
+
425
+ # 解析 V1 TTFT / 解码吞吐
426
+ delta = _metrics_delta(snap_before, snap_after)
427
+ if delta["ttft_cnt"] > 0:
428
+ ttft = delta["ttft_sum"] / delta["ttft_cnt"]
429
+ else:
430
+ ttft = float("nan")
431
+
432
+ if delta["tpot_cnt"] > 0:
433
+ avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
434
+ decode_tps = 1.0 / avg_tpot
435
+ else:
436
+ decode_tps = float("nan")
437
+
438
+ print(f"执行时间: {duration:.4f} s")
439
+ print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
440
+ print(f"生成总 tokens: {total_output_tokens}")
441
+ print(f"吞吐(生成tokens/秒): {throughput:.2f}")
442
+ print(f"TTFT (V1 metrics): {ttft:.4f} s")
443
+ print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
444
+
445
+ # —— 新增:打印该批次后内存 / KV cache 状态
446
+ _print_mem_and_kv(llm, f"after generate [{name}] bs={bs}")
447
+
448
+ print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
449
+
450
+ if __name__ == "__main__":
451
+ print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
452
+ main()
常用命令 ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hf upload Hamerlate/tts . --repo-type=dataset
2
+
3
+ nsys profile -w true -t cuda,nvtx,osrt --force-overwrite=true --stats=true --gpu-metrics-device=3 -x true -o traverse_bs_util_std python3 /data/cy/kv_cache_vs_util/traverse_bs_util_std.py > traverse_bs_util_std.log 2>&1
4
+
5
+ export CUDA_VISIBLE_DEVICES=3
6
+
7
+ nsys profile -w true -t cuda,nvtx,osrt --force-overwrite=true --stats=true --gpu-metrics-device=3 -x true -o traverse_bs_util_std python3 /data/cy/kv_cache_vs_util/traverse_bs_util_std.py > traverse_bs_util_std.log 2>&1
8
+
9
+ nsys profile -w true -t cuda,nvtx,osrt --force-overwrite=true --stats=true --gpu-metrics-device=3 -x true -o traverse_bs_util_std python3 /data/cy/kv_cache_vs_util/std_traverse_bs/traverse_bs_util_std.py > traverse_bs_util_std.log 2>&1
10
+
11
+ nsys profile -w true -t cuda,nvtx,osrt --force-overwrite=true --stats=true --gpu-metrics-device=3 -x true -o traverse_bs_util_sim_prefill python3 /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill.py > traverse_bs_util_sim_prefill.log 2>&1
12
+
13
+ nsys profile -w true -t cuda,nvtx,osrt --force-overwrite=true --stats=true --gpu-metrics-device=3 -x true -o traverse_bs_util_sim_decoding python3 /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding.py > traverse_bs_util_sim_decoding.log 2>&1
14
+
15
+ hf upload Hamerlate/tts std_traverse_bs sim_traverse_bs --repo-type=dataset
16
+
17
+ hf upload Hamerlate/tts sim_traverse_bs sim_traverse_bs --repo-type=dataset --exclude '*.sqlite'
18
+
19
+ hf upload Hamerlate/tts sim_traverse_bs --repo-type=dataset --exclude '*.sqlite'
20
+
21
+ nsys profile -w true -t cuda,nvtx,osrt --force-overwrite=true --stats=true --gpu-metrics-device=3 -x true -o traverse_bs_util_sim_prefill_1152 python3 /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_prefill.py > traverse_bs_util_sim_prefill_1152.log 2>&1
22
+
23
+ nsys profile -w true -t cuda,nvtx,osrt --force-overwrite=true --stats=true --gpu-metrics-device=3 -x true -o traverse_bs_util_sim_decoding_1024 python3 /data/cy/kv_cache_vs_util/sim_traverse_bs/traverse_bs_util_sim_decoding.py > traverse_bs_util_sim_decoding_1024.log 2>&1