Hamerlate commited on
Commit
34779f9
·
verified ·
1 Parent(s): b41ee71

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -64,3 +64,4 @@ vllm_tts_N64.nsys-rep filter=lfs diff=lfs merge=lfs -text
64
  qwen_util_bs.nsys-rep filter=lfs diff=lfs merge=lfs -text
65
  traverse_bs.nsys-rep filter=lfs diff=lfs merge=lfs -text
66
  sim_traverse_bs/traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
 
 
64
  qwen_util_bs.nsys-rep filter=lfs diff=lfs merge=lfs -text
65
  traverse_bs.nsys-rep filter=lfs diff=lfs merge=lfs -text
66
  sim_traverse_bs/traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
67
+ traverse_bs_util_std.nsys-rep filter=lfs diff=lfs merge=lfs -text
terminal.log ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 模型加载完成。
2
+
3
+ ===== 场景:prefill640_decode512 | prefill=640, decode=512 =====
4
+
5
+ --- 批量大小 bs=1 ---
6
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 431.65it/s]
7
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████████| 1/1 [00:02<00:00, 2.57s/it, est. speed input: 249.31 toks/s, output: 199.44 toks/s]
8
+ 执行时间: 2.5731 s
9
+ 实际平均输入 tokens: 640.00(目标 640)
10
+ 生成总 tokens: 512
11
+ 吞吐(生成tokens/秒): 198.98
12
+ TTFT (V1 metrics): 0.0238 s
13
+ 解码吞吐 (V1 metrics): 200.69 tok/s
14
+
15
+ --- 批量大小 bs=2 ---
16
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 202.04it/s]
17
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████████| 2/2 [00:02<00:00, 1.36s/it, est. speed input: 470.56 toks/s, output: 376.45 toks/s]
18
+ 执行时间: 2.7325 s
19
+ 实际平均输入 tokens: 640.00(目标 640)
20
+ 生成总 tokens: 1024
21
+ 吞吐(生成tokens/秒): 374.75
22
+ TTFT (V1 metrics): 0.0129 s
23
+ 解码吞吐 (V1 metrics): 188.36 tok/s
24
+
25
+ --- 批量大小 bs=4 ---
26
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 210.51it/s]
27
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████████| 4/4 [00:03<00:00, 1.33it/s, est. speed input: 852.39 toks/s, output: 681.91 toks/s]
28
+ 执行时间: 3.0248 s
29
+ 实际平均输入 tokens: 640.00(目标 640)
30
+ 生成总 tokens: 2048
31
+ 吞吐(生成tokens/秒): 677.07
32
+ TTFT (V1 metrics): 0.0164 s
33
+ 解码吞吐 (V1 metrics): 170.68 tok/s
34
+
35
+ --- 批量大小 bs=8 ---
36
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 8/8 [00:00<00:00, 229.89it/s]
37
+ Processed prompts: 100%|█████████████████████████████████████████████████████████████████| 8/8 [00:03<00:00, 2.59it/s, est. speed input: 1658.80 toks/s, output: 1327.03 toks/s]
38
+ 执行时间: 3.1236 s
39
+ 实际平均输入 tokens: 640.00(目标 640)
40
+ 生成总 tokens: 4096
41
+ 吞吐(生成tokens/秒): 1311.31
42
+ TTFT (V1 metrics): 0.0219 s
43
+ 解码吞吐 (V1 metrics): 165.93 tok/s
44
+
45
+ --- 批量大小 bs=16 ---
46
+ Adding requests: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 16/16 [00:00<00:00, 259.80it/s]
47
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████| 16/16 [00:03<00:00, 5.28it/s, est. speed input: 3376.77 toks/s, output: 2701.40 toks/s]
48
+ 执行时间: 3.0966 s
49
+ 实际平均输入 tokens: 640.00(目标 640)
50
+ 生成总 tokens: 8192
51
+ 吞吐(生成tokens/秒): 2645.49
52
+ TTFT (V1 metrics): 0.0314 s
53
+ 解码吞吐 (V1 metrics): 168.86 tok/s
54
+
55
+ --- 批量大小 bs=32 ---
56
+ Adding requests: 100%|████████████████████████████████████���█████████████████████████████████████████████████████████████████████████████████████| 32/32 [00:00<00:00, 335.64it/s]
57
+ Processed prompts: 100%|███████████████████████████████████████████████████████████████| 32/32 [00:03<00:00, 9.64it/s, est. speed input: 6170.77 toks/s, output: 4936.59 toks/s]
58
+ 执行时间: 3.4167 s
59
+ 实际平均输入 tokens: 640.00(目标 640)
60
+ 生成总 tokens: 16384
61
+ 吞吐(生成tokens/秒): 4795.34
62
+ TTFT (V1 metrics): 0.0426 s
63
+ 解码吞吐 (V1 metrics): 154.14 tok/s
64
+
65
+ --- 批量大小 bs=64 ---
66
+ Adding requests: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 64/64 [00:00<00:00, 449.34it/s]
67
+ Processed prompts: 100%|██████████████████████████████████████████████████████████████| 64/64 [00:03<00:00, 16.67it/s, est. speed input: 10672.90 toks/s, output: 8538.28 toks/s]
68
+ 执行时间: 3.9839 s
69
+ 实际平均输入 tokens: 640.00(目标 640)
70
+ 生成总 tokens: 32768
71
+ 吞吐(生成tokens/秒): 8225.04
72
+ TTFT (V1 metrics): 0.0638 s
73
+ 解码吞吐 (V1 metrics): 133.10 tok/s
74
+
75
+ --- 批量大小 bs=128 ---
76
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 128/128 [00:00<00:00, 489.69it/s]
77
+ Processed prompts: 100%|███████████████████████████████████████████████████████████| 128/128 [00:04<00:00, 26.33it/s, est. speed input: 16849.84 toks/s, output: 13425.52 toks/s]
78
+ 执行时间: 5.1276 s
79
+ 实际平均输入 tokens: 640.00(目标 640)
80
+ 生成总 tokens: 65272
81
+ 吞吐(生成tokens/秒): 12729.46
82
+ TTFT (V1 metrics): 0.1208 s
83
+ 解码吞吐 (V1 metrics): 104.39 tok/s
84
+
85
+ --- 批量大小 bs=256 ---
86
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 256/256 [00:00<00:00, 351.03it/s]
87
+ Processed prompts: 100%|███████████████████████████████████████████████████████████| 256/256 [00:07<00:00, 35.75it/s, est. speed input: 22881.00 toks/s, output: 18304.75 toks/s]
88
+ 执行时间: 7.8977 s
89
+ 实际平均输入 tokens: 640.00(目标 640)
90
+ 生成总 tokens: 131072
91
+ 吞吐(生成tokens/秒): 16596.30
92
+ TTFT (V1 metrics): 0.4649 s
93
+ 解码吞吐 (V1 metrics): 69.79 tok/s
94
+
95
+ --- 批量大小 bs=512 ---
96
+ Adding requests: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 512/512 [00:00<00:00, 535.56it/s]
97
+ Processed prompts: 100%|███████████████████████████████████████████████████████████| 512/512 [00:12<00:00, 41.03it/s, est. speed input: 26258.02 toks/s, output: 21006.35 toks/s]
98
+ 执行时间: 13.4567 s
99
+ 实际平均输入 tokens: 640.00(目标 640)
100
+ 生成总 tokens: 262144
101
+ 吞吐(生成tokens/秒): 19480.62
102
+ TTFT (V1 metrics): 0.4882 s
103
+ 解码吞吐 (V1 metrics): 40.60 tok/s
104
+
105
+ --- 批量大小 bs=1024 ---
106
+ Adding requests: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1024/1024 [00:01<00:00, 553.17it/s]
107
+ Processed prompts: 100%|████████��████████████████████████████████████████████████| 1024/1024 [00:24<00:00, 41.44it/s, est. speed input: 26524.06 toks/s, output: 21219.22 toks/s]
108
+ 执行时间: 26.5997 s
109
+ 实际平均输入 tokens: 640.00(目标 640)
110
+ 生成总 tokens: 524288
111
+ 吞吐(生成tokens/秒): 19710.29
112
+ TTFT (V1 metrics): 0.9604 s
113
+ 解码吞吐 (V1 metrics): 20.56 tok/s
114
+
115
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
116
+ [rank0]:[W813 18:53:54.532255598 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
traverse_bs_util_std.log ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WARNING: CPU IP/backtrace sampling not supported, disabling.
2
+ Try the 'nsys status --environment' command to learn more.
3
+
4
+ WARNING: CPU context switch tracing not supported, disabling.
5
+ Try the 'nsys status --environment' command to learn more.
6
+
7
+ INFO 08-13 19:02:19 [__init__.py:235] Automatically detected platform cuda.
8
+ CUDA_VISIBLE_DEVICES = 3
9
+ --- vLLM V1 基准测试(含 NVTX 标记)---
10
+ 模型: Qwen/Qwen2-1.5B
11
+ 批量大小: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
12
+ 场景: ['prefill640_decode512']
13
+ ------------------------------------------------------------
14
+ 加载分词器/模型中...
15
+ INFO 08-13 19:02:29 [config.py:1604] Using max model len 4096
16
+ INFO 08-13 19:02:29 [config.py:2434] Chunked prefill is enabled with max_num_batched_tokens=8192.
17
+ INFO 08-13 19:02:35 [__init__.py:235] Automatically detected platform cuda.
18
+ INFO 08-13 19:02:37 [core.py:572] Waiting for init message from front-end.
19
+ INFO 08-13 19:02:37 [core.py:71] Initializing a V1 LLM engine (v0.10.0) with config: model='Qwen/Qwen2-1.5B', speculative_config=None, tokenizer='Qwen/Qwen2-1.5B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen2-1.5B, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, pooler_config=None, compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output","vllm.mamba_mixer2"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null}
20
+ INFO 08-13 19:02:40 [parallel_state.py:1102] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0
21
+ WARNING 08-13 19:02:40 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
22
+ INFO 08-13 19:02:40 [gpu_model_runner.py:1843] Starting to load model Qwen/Qwen2-1.5B...
23
+ INFO 08-13 19:02:40 [gpu_model_runner.py:1875] Loading model from scratch...
24
+ INFO 08-13 19:02:40 [cuda.py:290] Using Flash Attention backend on V1 engine.
25
+ INFO 08-13 19:02:40 [weight_utils.py:296] Using model weights format ['*.safetensors']
26
+ INFO 08-13 19:02:41 [weight_utils.py:349] No model.safetensors.index.json found in remote.
27
+
28
+
29
+
30
+
31
+ INFO 08-13 19:02:42 [default_loader.py:262] Loading weights took 0.75 seconds
32
+ INFO 08-13 19:02:42 [gpu_model_runner.py:1892] Model loading took 2.9105 GiB and 1.965894 seconds
33
+ INFO 08-13 19:02:48 [backends.py:530] Using cache directory: /home/cy/.cache/vllm/torch_compile_cache/40b61c71e9/rank_0_0/backbone for vLLM's torch.compile
34
+ INFO 08-13 19:02:48 [backends.py:541] Dynamo bytecode transform time: 6.08 s
35
+ INFO 08-13 19:02:53 [backends.py:194] Cache the graph for dynamic shape for later use
36
+ INFO 08-13 19:03:14 [backends.py:215] Compiling a graph for dynamic shape takes 25.33 s
37
+ INFO 08-13 19:03:21 [monitor.py:34] torch.compile takes 31.41 s in total
38
+ INFO 08-13 19:03:22 [gpu_worker.py:255] Available KV cache memory: 12.80 GiB
39
+ INFO 08-13 19:03:22 [kv_cache_utils.py:833] GPU KV cache size: 479,456 tokens
40
+ INFO 08-13 19:03:22 [kv_cache_utils.py:837] Maximum concurrency for 4,096 tokens per request: 117.05x
41
+
42
+ INFO 08-13 19:03:25 [gpu_model_runner.py:2485] Graph capturing finished in 2 secs, took 0.49 GiB
43
+ INFO 08-13 19:03:25 [core.py:193] init engine (profile, create kv cache, warmup model) took 42.37 seconds
44
+ 模型加载完成。
45
+
46
+ ===== 场景:prefill640_decode512 | prefill=640, decode=512 =====
47
+
48
+ --- 批量大小 bs=1 ---
49
+
50
+
51
+ 执行时间: 3.3360 s
52
+ 实际平均输入 tokens: 640.00(目标 640)
53
+ 生成总 tokens: 512
54
+ 吞吐(生成tokens/秒): 153.48
55
+ TTFT (V1 metrics): 0.0327 s
56
+ 解码吞吐 (V1 metrics): 154.93 tok/s
57
+
58
+ --- 批量大小 bs=2 ---
59
+
60
+
61
+ 执行时间: 3.7300 s
62
+ 实际平均输入 tokens: 640.00(目标 640)
63
+ 生成总 tokens: 1024
64
+ 吞吐(生成tokens/秒): 274.53
65
+ TTFT (V1 metrics): 0.0158 s
66
+ 解码吞吐 (V1 metrics): 137.84 tok/s
67
+
68
+ --- 批量大小 bs=4 ---
69
+
70
+
71
+ 执行时间: 3.6164 s
72
+ 实际平均输入 tokens: 640.00(目标 640)
73
+ 生成总 tokens: 2048
74
+ 吞吐(生成tokens/秒): 566.30
75
+ TTFT (V1 metrics): 0.0169 s
76
+ 解码吞吐 (V1 metrics): 142.63 tok/s
77
+
78
+ --- 批量大小 bs=8 ---
79
+
80
+
81
+ 执行时间: 3.7265 s
82
+ 实际平均输入 tokens: 640.00(目标 640)
83
+ 生成总 tokens: 4096
84
+ 吞吐(生成tokens/秒): 1099.15
85
+ TTFT (V1 metrics): 0.0219 s
86
+ 解码吞吐 (V1 metrics): 138.89 tok/s
87
+
88
+ --- 批量大小 bs=16 ---
89
+
90
+
91
+ 执行时间: 3.8919 s
92
+ 实际平均输入 tokens: 640.00(目标 640)
93
+ 生成总 tokens: 8192
94
+ 吞吐(生成tokens/秒): 2104.89
95
+ TTFT (V1 metrics): 0.0329 s
96
+ 解码吞吐 (V1 metrics): 133.82 tok/s
97
+
98
+ --- 批量大小 bs=32 ---
99
+
100
+
101
+ 执行时间: 4.0341 s
102
+ 实际平均输入 tokens: 640.00(目标 640)
103
+ 生成总 tokens: 16384
104
+ 吞吐(生成tokens/秒): 4061.41
105
+ TTFT (V1 metrics): 0.0461 s
106
+ 解码吞吐 (V1 metrics): 130.12 tok/s
107
+
108
+ --- 批量大小 bs=64 ---
109
+
110
+
111
+ 执行时间: 4.4199 s
112
+ 实际平均输入 tokens: 640.00(目标 640)
113
+ 生成总 tokens: 32768
114
+ 吞吐(生成tokens/秒): 7413.77
115
+ TTFT (V1 metrics): 0.0691 s
116
+ 解码吞吐 (V1 metrics): 120.00 tok/s
117
+
118
+ --- 批量大小 bs=128 ---
119
+
120
+
121
+ 执行时间: 6.2947 s
122
+ 实际平均输入 tokens: 640.00(目标 640)
123
+ 生成总 tokens: 65421
124
+ 吞吐(生成tokens/秒): 10393.02
125
+ TTFT (V1 metrics): 0.1218 s
126
+ 解码吞吐 (V1 metrics): 84.64 tok/s
127
+
128
+ --- 批量大小 bs=256 ---
129
+
130
+
131
+ 执行时间: 9.2625 s
132
+ 实际平均输入 tokens: 640.00(目标 640)
133
+ 生成总 tokens: 131072
134
+ 吞吐(生成tokens/秒): 14150.76
135
+ TTFT (V1 metrics): 0.4813 s
136
+ 解码吞吐 (V1 metrics): 59.00 tok/s
137
+
138
+ --- 批量大小 bs=512 ---
139
+
140
+
141
+ 执行时间: 14.1481 s
142
+ 实际平均输入 tokens: 640.00(目标 640)
143
+ 生成总 tokens: 262144
144
+ 吞吐(生成tokens/秒): 18528.59
145
+ TTFT (V1 metrics): 0.4908 s
146
+ 解码吞吐 (V1 metrics): 38.46 tok/s
147
+
148
+ --- 批量大小 bs=1024 ---
149
+
150
+
151
+ [rank0]:[W813 19:04:51.071947265 ProcessGroupNCCL.cpp:1479] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
152
+ 执行时间: 27.7908 s
153
+ 实际平均输入 tokens: 640.00(目标 640)
154
+ 生成总 tokens: 524288
155
+ 吞吐(生成tokens/秒): 18865.49
156
+ TTFT (V1 metrics): 0.9638 s
157
+ 解码吞吐 (V1 metrics): 19.72 tok/s
158
+
159
+ 完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。
160
+ GPU 3: General Metrics for NVIDIA AD10x (any frequency)
161
+ Generating '/tmp/nsys-report-e7ae.qdstrm'
162
+
163
+
164
+ [3/8] Executing 'nvtx_sum' stats report
165
+
166
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range
167
+ -------- --------------- --------- ---------------- ---------------- -------------- -------------- ----------- ------- ----------------------------------------
168
+ 43.1 63,806,267,138 1 63,806,267,138.0 63,806,267,138.0 63,806,267,138 63,806,267,138 0.0 PushPop :LLM_init
169
+ 18.8 27,790,304,411 1 27,790,304,411.0 27,790,304,411.0 27,790,304,411 27,790,304,411 0.0 PushPop :generate [prefill640_decode512] bs=1024
170
+ 9.6 14,147,468,287 1 14,147,468,287.0 14,147,468,287.0 14,147,468,287 14,147,468,287 0.0 PushPop :generate [prefill640_decode512] bs=512
171
+ 6.3 9,262,392,366 1 9,262,392,366.0 9,262,392,366.0 9,262,392,366 9,262,392,366 0.0 PushPop :generate [prefill640_decode512] bs=256
172
+ 4.3 6,294,556,076 1 6,294,556,076.0 6,294,556,076.0 6,294,556,076 6,294,556,076 0.0 PushPop :generate [prefill640_decode512] bs=128
173
+ 3.0 4,419,734,921 1 4,419,734,921.0 4,419,734,921.0 4,419,734,921 4,419,734,921 0.0 PushPop :generate [prefill640_decode512] bs=64
174
+ 2.7 4,033,922,062 1 4,033,922,062.0 4,033,922,062.0 4,033,922,062 4,033,922,062 0.0 PushPop :generate [prefill640_decode512] bs=32
175
+ 2.6 3,891,757,396 1 3,891,757,396.0 3,891,757,396.0 3,891,757,396 3,891,757,396 0.0 PushPop :generate [prefill640_decode512] bs=16
176
+ 2.5 3,729,817,085 1 3,729,817,085.0 3,729,817,085.0 3,729,817,085 3,729,817,085 0.0 PushPop :generate [prefill640_decode512] bs=2
177
+ 2.5 3,726,348,651 1 3,726,348,651.0 3,726,348,651.0 3,726,348,651 3,726,348,651 0.0 PushPop :generate [prefill640_decode512] bs=8
178
+ 2.4 3,616,307,172 1 3,616,307,172.0 3,616,307,172.0 3,616,307,172 3,616,307,172 0.0 PushPop :generate [prefill640_decode512] bs=4
179
+ 2.3 3,335,871,818 1 3,335,871,818.0 3,335,871,818.0 3,335,871,818 3,335,871,818 0.0 PushPop :generate [prefill640_decode512] bs=1
180
+ 0.0 88,217 2 44,108.5 44,108.5 42,206 46,011 2,690.5 PushPop CCCL:cub::DeviceSegmentedRadixSort
181
+
182
+ [4/8] Executing 'osrt_sum' stats report
183
+
184
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
185
+ -------- ----------------- --------- --------------- ---------------- --------- --------------- --------------- ----------------------
186
+ 30.2 1,744,937,975,193 54,559 31,982,587.2 36,252.0 1,000 131,944,469,826 1,264,387,583.2 pthread_cond_timedwait
187
+ 24.4 1,409,850,293,508 90,937 15,503,593.6 10,063,900.0 1,000 89,656,403,165 542,281,129.3 epoll_wait
188
+ 24.3 1,405,293,461,182 2,062 681,519,622.3 169,137.0 1,777 131,946,128,306 9,309,452,118.6 pthread_cond_wait
189
+ 8.2 470,649,973,426 67 7,024,626,469.0 10,000,075,590.0 8,879 10,000,128,605 4,563,470,551.8 sem_timedwait
190
+ 7.6 440,451,323,826 42,747 10,303,678.0 1,400.0 1,000 13,214,170,856 151,629,150.0 poll
191
+ 2.6 148,761,603,454 44,756 3,323,836.0 2,218.0 1,000 130,016,250,506 617,021,890.8 read
192
+ 2.4 141,326,431,455 11,558 12,227,585.3 7,211,303.0 21,424 658,160,680 14,124,156.6 sem_wait
193
+ 0.1 5,907,485,407 725 8,148,255.7 1,042.0 1,000 442,887,066 44,916,303.4 waitpid
194
+ 0.0 1,252,683,205 505,872 2,476.3 1,440.0 1,006 94,581,342 133,009.9 munmap
195
+ 0.0 907,262,068 353 2,570,147.5 1,227,045.0 1,197 26,263,430 3,108,782.5 pthread_rwlock_wrlock
196
+ 0.0 708,053,362 172 4,116,589.3 615,554.5 3,112 29,565,907 7,196,056.2 pthread_join
197
+ 0.0 327,596,329 10,080 32,499.6 10,962.5 1,000 29,220,721 379,953.4 ioctl
198
+ 0.0 261,805,946 495 528,900.9 2,972.0 1,077 19,958,090 3,072,110.4 fopen
199
+ 0.0 160,631,905 36,713 4,375.3 3,385.0 1,000 1,692,951 12,061.0 mmap64
200
+ 0.0 150,219,603 6,263 23,985.2 8,256.0 1,010 2,585,639 128,799.4 pthread_mutex_lock
201
+ 0.0 126,650,615 25 5,066,024.6 5,065,286.0 5,053,603 5,077,238 7,361.7 nanosleep
202
+ 0.0 99,487,929 31,988 3,110.2 2,516.0 1,000 75,099 2,795.8 open64
203
+ 0.0 90,344,763 9,083 9,946.6 4,712.0 1,148 2,648,924 36,587.2 recv
204
+ 0.0 84,823,390 9,082 9,339.7 5,735.5 1,468 89,916 8,146.6 send
205
+ 0.0 78,685,390 43,321 1,816.3 1,662.0 1,000 354,334 2,967.5 pthread_cond_signal
206
+ 0.0 69,793,423 5,751 12,135.9 2,001.0 1,035 19,751,200 420,727.9 open
207
+ 0.0 66,948,530 15,954 4,196.3 2,557.0 1,010 575,245 10,386.1 write
208
+ 0.0 50,999,387 10 5,099,938.7 19,477.5 9,808 50,802,125 16,058,116.0 connect
209
+ 0.0 18,140,344 11,019 1,646.3 1,411.0 1,000 24,064 709.4 epoll_ctl
210
+ 0.0 18,080,692 280 64,573.9 51,655.5 16,288 578,620 60,626.8 pthread_create
211
+ 0.0 9,776,017 147 66,503.5 68,572.0 55,588 85,413 5,091.0 sleep
212
+ 0.0 7,076,518 18 393,139.9 383,949.5 262,109 584,262 85,377.7 posix_spawn
213
+ 0.0 6,601,831 899 7,343.5 5,539.0 1,010 84,493 9,244.8 fgets
214
+ 0.0 5,962,720 22 271,032.7 169,725.0 16,478 675,901 237,522.4 pthread_rwlock_rdlock
215
+ 0.0 3,506,178 342 10,252.0 2,354.5 1,006 207,881 30,027.2 pthread_cond_broadcast
216
+ 0.0 3,013,330 1,417 2,126.6 1,083.0 1,008 110,583 7,412.5 fclose
217
+ 0.0 2,975,781 1,319 2,256.1 1,598.0 1,000 22,362 1,984.6 stat
218
+ 0.0 2,885,075 692 4,169.2 4,026.5 1,238 45,085 4,258.3 fopen64
219
+ 0.0 2,145,928 345 6,220.1 3,291.0 1,000 51,101 8,060.8 fread
220
+ 0.0 1,950,425 65 30,006.5 3,106.0 1,201 261,266 71,483.9 futex
221
+ 0.0 1,877,146 336 5,586.7 4,406.0 1,002 83,435 6,148.2 mmap
222
+ 0.0 1,741,859 102 17,077.0 4,155.5 1,030 432,266 67,240.1 fwrite
223
+ 0.0 1,680,806 1,203 1,397.2 1,251.0 1,000 8,322 493.0 fstat
224
+ 0.0 1,075,803 1 1,075,803.0 1,075,803.0 1,075,803 1,075,803 0.0 fork
225
+ 0.0 721,566 99 7,288.5 6,229.0 2,560 17,976 3,711.4 pipe2
226
+ 0.0 595,674 19 31,351.3 5,092.0 4,006 383,239 86,162.0 putc
227
+ 0.0 248,534 41 6,061.8 4,850.0 1,628 18,035 4,032.6 socket
228
+ 0.0 183,085 115 1,592.0 1,513.0 1,001 2,876 429.3 sigaction
229
+ 0.0 167,627 16 10,476.7 2,835.5 1,086 55,140 16,437.5 bind
230
+ 0.0 115,447 8 14,430.9 6,418.0 3,542 41,638 15,571.8 fputs
231
+ 0.0 93,879 16 5,867.4 5,035.0 1,640 14,112 3,769.0 lstat
232
+ 0.0 60,214 6 10,035.7 10,003.0 9,288 10,680 531.1 getc
233
+ 0.0 49,588 27 1,836.6 1,677.0 1,019 2,939 596.6 dup2
234
+ 0.0 47,084 37 1,272.5 1,112.0 1,002 3,284 431.2 fcntl
235
+ 0.0 37,610 24 1,567.1 1,488.5 1,021 2,512 357.1 signal
236
+ 0.0 35,001 5 7,000.2 7,792.0 3,662 10,502 2,759.1 accept4
237
+ 0.0 31,541 9 3,504.6 4,424.0 1,078 6,400 2,269.5 fflush
238
+ 0.0 15,432 4 3,858.0 3,690.5 2,526 5,525 1,477.8 flock
239
+ 0.0 15,311 11 1,391.9 1,301.0 1,125 1,918 254.0 listen
240
+ 0.0 14,040 8 1,755.0 1,552.0 1,310 3,235 642.4 pread
241
+ 0.0 12,841 3 4,280.3 4,425.0 3,921 4,495 313.2 fputs_unlocked
242
+ 0.0 12,488 5 2,497.6 2,496.0 2,206 2,947 282.0 mprotect
243
+ 0.0 10,666 1 10,666.0 10,666.0 10,666 10,666 0.0 dup
244
+ 0.0 6,561 3 2,187.0 1,727.0 1,636 3,198 876.7 flockfile
245
+ 0.0 6,208 1 6,208.0 6,208.0 6,208 6,208 0.0 kill
246
+ 0.0 3,788 2 1,894.0 1,894.0 1,355 2,433 762.3 openat64
247
+ 0.0 2,317 2 1,158.5 1,158.5 1,025 1,292 188.8 pthread_mutex_trylock
248
+
249
+ [5/8] Executing 'cuda_api_sum' stats report
250
+
251
+ Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
252
+ -------- --------------- --------- ----------- ----------- -------- ----------- ----------- ------------------------------------------
253
+ 54.0 18,079,332,597 62,830 287,750.0 8,213.0 2,790 112,775,333 1,983,727.7 cudaMemcpyAsync
254
+ 21.3 7,151,454,905 1,151,462 6,210.8 4,908.0 775 60,344,468 100,074.1 cudaLaunchKernel
255
+ 13.9 4,659,698,239 3,031 1,537,346.8 36,322.0 1,646 137,811,508 5,892,401.8 cudaDeviceSynchronize
256
+ 5.7 1,902,754,251 154,454 12,319.2 10,944.0 7,132 6,919,461 39,439.6 cudaGraphLaunch_v10000
257
+ 2.4 817,695,167 151,595 5,393.9 4,998.0 606 8,288,395 63,229.0 cuLaunchKernel
258
+ 0.7 225,761,461 1,943 116,192.2 74,685.0 38,766 1,503,454 195,876.4 cudaGraphInstantiateWithFlags_v11040
259
+ 0.4 145,677,030 27,893 5,222.7 5,401.0 170 3,577,542 23,514.0 cudaMemsetAsync
260
+ 0.4 123,078,496 156,852 784.7 749.0 293 28,187 217.2 cudaStreamIsCapturing_v10000
261
+ 0.2 80,818,259 41,653 1,940.3 1,901.0 1,681 230,930 1,185.3 cudaEventRecord
262
+ 0.2 70,123,080 11,007 6,370.8 2,991.0 1,576 11,464,127 117,490.1 cudaStreamSynchronize
263
+ 0.2 54,985,787 222 247,683.7 127,094.5 70,289 2,353,583 356,501.3 cudaFree
264
+ 0.1 39,808,657 349 114,064.9 107,734.0 9,252 1,028,648 56,280.0 cudaMalloc
265
+ 0.1 33,850,909 41,671 812.3 782.0 275 186,043 928.6 cudaEventCreateWithFlags
266
+ 0.1 25,030,442 10 2,503,044.2 2,591,457.0 57,483 4,465,642 1,429,904.6 cuLibraryLoadData
267
+ 0.1 20,138,251 281 71,666.4 73,047.0 25,875 416,759 45,705.2 cuModuleLoadData
268
+ 0.1 18,115,827 41,653 434.9 403.0 338 226,830 1,887.5 cudaEventDestroy
269
+ 0.1 17,924,935 16,808 1,066.5 493.0 261 6,344,359 50,228.8 cuKernelGetFunction
270
+ 0.0 9,263,417 18,895 490.3 467.0 322 6,477 105.4 cudaStreamGetCaptureInfo_v2_v11030
271
+ 0.0 7,974,210 1,943 4,104.1 4,022.0 3,214 9,676 584.5 cudaStreamBeginCapture_v10000
272
+ 0.0 7,518,878 1,943 3,869.7 3,828.0 2,357 7,833 536.7 cudaGraphDestroy_v10000
273
+ 0.0 3,416,827 128 26,694.0 2,299.0 1,471 1,153,703 140,616.2 cudaStreamCreateWithPriority
274
+ 0.0 2,744,082 1,943 1,412.3 1,389.0 1,050 7,178 196.6 cudaStreamEndCapture_v10000
275
+ 0.0 1,570,575 1,943 808.3 739.0 614 2,547 251.2 cudaGraphGetNodes_v10000
276
+ 0.0 1,322,243 15 88,149.5 6,436.0 3,579 1,170,830 300,044.6 cudaHostAlloc
277
+ 0.0 280,352 8 35,044.0 26,955.5 12,673 101,212 28,421.5 cudaMemGetInfo
278
+ 0.0 138,906 810 171.5 140.0 79 1,705 118.0 cuGetProcAddress_v2
279
+ 0.0 23,009 16 1,438.1 808.5 451 5,531 1,508.2 cuLibraryGetKernel
280
+ 0.0 8,159 14 582.8 544.5 324 990 193.4 cudaThreadExchangeStreamCaptureMode_v10010
281
+ 0.0 4,031 1 4,031.0 4,031.0 4,031 4,031 0.0 cudaStreamWaitEvent
282
+ 0.0 3,969 3 1,323.0 1,051.0 1,031 1,887 488.5 cuInit
283
+ 0.0 3,693 4 923.3 916.5 75 1,785 960.8 cuModuleGetLoadingMode
284
+ 0.0 1,064 2 532.0 532.0 356 708 248.9 cudaGetDriverEntryPoint_v11030
285
+
286
+ [6/8] Executing 'cuda_gpu_kern_sum' stats report
287
+
288
+ Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name
289
+ -------- --------------- --------- ----------- ----------- --------- --------- ----------- ----------------------------------------------------------------------------------------------------
290
+ 30.6 11,421,290,038 118,048 96,751.2 42,337.0 12,320 576,069 124,175.4 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
291
+ 20.4 7,605,306,813 28,807 264,009.0 265,858.0 32,961 765,542 115,223.0 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn
292
+ 8.8 3,277,208,753 47,634 68,799.8 77,473.0 800 81,121 21,559.5 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<int>, std::array<cha…
293
+ 5.6 2,083,505,861 1,271 1,639,265.0 1,387,852.0 39,745 4,515,436 1,121,994.0 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn
294
+ 5.1 1,891,072,505 20,210 93,571.1 20,224.0 1,055 481,762 169,672.7 triton_poi_fused_mul_silu_1
295
+ 4.3 1,624,497,751 101,584 15,991.7 8,544.0 6,367 81,025 11,832.8 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
296
+ 3.3 1,229,556,187 9,203 133,603.8 42,977.0 7,648 557,317 169,265.9 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x64_32x6_tn_align8>(T1::Param…
297
+ 2.5 940,467,368 5,865 160,352.5 13,312.0 1,984 1,008,298 288,283.2 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
298
+ 2.2 808,325,696 2,044 395,462.7 496,644.0 10,592 510,980 192,958.7 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x2_tn_align8>(T1::Par…
299
+ 1.9 720,826,362 5,867 122,861.1 9,824.0 5,120 714,183 210,401.9 void at::native::reduce_kernel<(int)512, (int)1, at::native::ReduceOp<float, at::native::ArgMaxOps<…
300
+ 1.8 666,198,895 287,392 2,318.1 1,920.0 1,536 6,368 968.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
301
+ 1.6 609,901,106 6,048 100,843.4 32,161.0 6,912 3,159,614 305,220.1 void flash::flash_fwd_splitkv_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (int)4, (…
302
+ 1.5 576,753,139 13,496 42,735.1 42,624.0 26,016 102,081 3,914.6 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn
303
+ 1.3 487,869,104 13,020 37,470.7 37,440.0 36,640 42,816 321.1 ampere_bf16_s1688gemm_bf16_64x64_sliced1x4_ldg8_f2f_tn
304
+ 1.2 431,150,928 22,086 19,521.5 3,489.0 1,344 75,104 27,792.8 triton_poi_fused_cat_3
305
+ 0.9 347,479,864 341 1,019,002.5 598,630.0 373,475 2,788,687 741,262.9 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_tn
306
+ 0.8 316,815,668 1,904 166,394.8 154,193.5 40,352 1,291,207 149,498.4 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn
307
+ 0.7 264,631,581 521 507,930.1 507,844.0 506,244 519,973 763.9 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_16x16_128x1_tn_align8>(T1::Par…
308
+ 0.7 258,705,730 610 424,107.8 487,972.0 6,976 500,003 159,920.1 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, __nv_bfloat16, __n…
309
+ 0.7 248,913,221 164,164 1,516.2 1,280.0 1,023 13,249 475.7 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0…
310
+ 0.6 206,205,114 57,848 3,564.6 3,584.0 3,295 3,936 112.4 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
311
+ 0.5 203,900,076 143,696 1,419.0 1,344.0 1,183 2,208 221.7 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
312
+ 0.5 189,670,922 16,968 11,178.2 12,256.0 1,536 111,105 5,147.2 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_2
313
+ 0.5 173,673,906 22,086 7,863.5 2,368.0 832 25,696 9,575.0 triton_poi_fused_view_5
314
+ 0.5 170,442,702 61,516 2,770.7 2,432.0 1,247 17,088 1,351.4 void vllm::merge_attn_states_kernel<__nv_bfloat16, (unsigned int)128>(T1 *, float *, const T1 *, co…
315
+ 0.3 95,480,145 22,086 4,323.1 1,376.0 1,215 15,104 5,013.1 triton_poi_fused_cat_4
316
+ 0.2 88,249,892 16,968 5,201.0 5,473.0 1,535 80,096 3,526.8 triton_red_fused__to_copy_add_mean_mul_pow_rsqrt_0
317
+ 0.2 58,167,224 15,232 3,818.8 3,840.0 3,711 4,032 31.4 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
318
+ 0.1 45,137,635 14,420 3,130.2 3,167.0 3,008 3,457 59.7 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
319
+ 0.1 43,394,155 14,084 3,081.1 3,072.0 3,008 3,200 19.7 void flash::flash_fwd_splitkv_combine_kernel<Flash_fwd_kernel_traits<(int)128, (int)64, (int)128, (…
320
+ 0.1 39,138,907 8 4,892,363.4 4,849,279.0 4,795,198 5,088,513 110,842.2 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
321
+ 0.1 22,694,961 784 28,947.7 12,575.5 11,744 62,528 20,606.8 ampere_bf16_s16816gemm_bf16_64x64_ldg8_f2f_stages_64x5_tn
322
+ 0.1 20,952,447 5,999 3,492.7 3,104.0 2,751 7,392 961.1 void at::native::index_elementwise_kernel<(int)128, (int)4, void at::native::gpu_index_kernel<void …
323
+ 0.1 20,383,074 4 5,095,768.5 5,085,968.5 4,915,359 5,295,778 195,206.9 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel<at_cuda_detail::cub::DeviceRadixSortPolicy…
324
+ 0.0 15,859,055 818 19,387.6 3,136.0 1,600 74,881 27,973.6 triton_poi_fused_cat_1
325
+ 0.0 14,288,503 28 510,303.7 511,683.0 469,923 513,059 7,932.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<signed char>, std::a…
326
+ 0.0 9,813,089 4 2,453,272.3 2,468,672.5 2,391,504 2,484,240 42,132.0 void at::native::<unnamed>::cunn_SoftMaxForward<(int)4, float, float, float, at::native::<unnamed>:…
327
+ 0.0 9,653,448 448 21,547.9 21,345.0 21,120 24,928 817.3 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn
328
+ 0.0 9,408,780 5,863 1,604.8 1,376.0 1,120 2,752 455.9 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
329
+ 0.0 8,489,112 224 37,897.8 37,856.0 36,545 39,328 536.2 void cutlass::Kernel2<cutlass_80_tensorop_bf16_s16816gemm_relu_bf16_64x128_32x6_tn_align8>(T1::Para…
330
+ 0.0 8,311,916 28 296,854.1 294,593.5 293,505 332,482 7,194.4 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn
331
+ 0.0 7,845,574 9,023 869.5 864.0 767 1,281 77.2 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<long>, std::array<ch…
332
+ 0.0 7,775,377 2 3,887,688.5 3,887,688.5 3,705,239 4,070,138 258,022.6 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
333
+ 0.0 7,471,240 5,865 1,273.9 1,120.0 991 2,080 282.3 void at::native::unrolled_elementwise_kernel<at::native::direct_copy_kernel_cuda(at::TensorIterator…
334
+ 0.0 7,214,254 336 21,471.0 21,408.0 21,056 22,625 299.8 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn
335
+ 0.0 6,324,707 818 7,731.9 2,368.0 863 24,832 9,334.7 triton_poi_fused_view_3
336
+ 0.0 6,169,925 476 12,962.0 12,864.0 11,776 14,304 602.5 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn
337
+ 0.0 5,903,909 4 1,475,977.3 1,475,449.0 1,472,585 1,480,426 3,980.7 void at::native::vectorized_elementwise_kernel<(int)4, at::native::<unnamed>::masked_fill_kernel(at…
338
+ 0.0 5,501,036 5,863 938.3 928.0 895 1,312 71.9 void at::native::unrolled_elementwise_kernel<at::native::CUDAFunctorOnSelf_add<int>, std::array<cha…
339
+ 0.0 4,936,334 28 176,297.6 176,817.5 174,209 178,978 1,446.6 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_relu_f2f_tn
340
+ 0.0 4,754,998 5,432 875.4 864.0 800 1,217 36.3 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<int>, std::array<char *, (unsi…
341
+ 0.0 3,995,930 2 1,997,965.0 1,997,965.0 1,996,108 1,999,822 2,626.2 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor<float, float, floa…
342
+ 0.0 3,598,526 56 64,259.4 64,320.5 63,104 65,313 559.8 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_64x1_tn_align8>(T1::Para…
343
+ 0.0 3,541,818 818 4,329.9 1,376.0 1,215 14,815 5,022.4 triton_poi_fused_cat_2
344
+ 0.0 3,434,102 4 858,525.5 858,069.5 855,685 862,278 2,939.8 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
345
+ 0.0 3,192,725 2 1,596,362.5 1,596,362.5 1,563,050 1,629,675 47,111.0 void at::native::tensor_kernel_scan_innermost_dim<float, std::plus<float>>(T1 *, const T1 *, unsign…
346
+ 0.0 2,897,906 1,512 1,916.6 1,824.0 1,312 2,976 438.1 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
347
+ 0.0 2,593,403 606 4,279.5 4,384.0 1,984 35,904 1,445.1 triton_red_fused__to_copy_add_embedding_mean_mul_pow_rsqrt_0
348
+ 0.0 2,582,288 2 1,291,144.0 1,291,144.0 1,290,536 1,291,752 859.8 at::native::<unnamed>::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider<unsign…
349
+ 0.0 2,580,625 2 1,290,312.5 1,290,312.5 1,288,393 1,292,232 2,714.6 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
350
+ 0.0 2,421,451 112 21,620.1 21,552.0 9,376 34,400 12,006.5 void cutlass::Kernel2<cutlass_80_wmma_tensorop_bf16_s161616gemm_bf16_32x32_128x2_tn_align8>(T1::Par…
351
+ 0.0 1,377,256 2 688,628.0 688,628.0 682,820 694,436 8,213.8 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
352
+ 0.0 1,128,024 1,252 901.0 896.0 800 1,280 45.2 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<int>, std::array<cha…
353
+ 0.0 957,446 28 34,194.5 34,768.5 17,983 35,232 3,184.5 std::enable_if<!T7, void>::type internal::gemvx::kernel<int, int, __nv_bfloat16, float, float, floa…
354
+ 0.0 670,018 731 916.6 928.0 863 1,024 22.0 void at::native::unrolled_elementwise_kernel<at::native::FillFunctor<long>, std::array<char *, (uns…
355
+ 0.0 296,898 168 1,767.3 1,760.0 1,536 2,080 121.8 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo…
356
+ 0.0 157,249 1 157,249.0 157,249.0 157,249 157,249 0.0 void at::native::<unnamed>::CatArrayBatchedCopy_aligned16_contig<at::native::<unnamed>::OpaqueType<…
357
+ 0.0 90,491 86 1,052.2 927.5 895 11,488 1,139.9 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<c10::BFloat16>, std:…
358
+ 0.0 78,785 1 78,785.0 78,785.0 78,785 78,785 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::bfloat16_copy_kernel_cuda(at::Te…
359
+ 0.0 43,232 1 43,232.0 43,232.0 43,232 43,232 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::sin_kernel_cuda(at::TensorIterat…
360
+ 0.0 36,737 28 1,312.0 1,312.0 1,280 1,344 17.4 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n…
361
+ 0.0 26,432 1 26,432.0 26,432.0 26,432 26,432 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::cos_kernel_cuda(at::TensorIterat…
362
+ 0.0 19,520 1 19,520.0 19,520.0 19,520 19,520 0.0 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast<at::n…
363
+ 0.0 11,713 11 1,064.8 864.0 800 1,536 305.4 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor<float>, std::array<c…
364
+ 0.0 10,624 2 5,312.0 5,312.0 5,024 5,600 407.3 void at::native::_scatter_gather_elementwise_kernel<(int)128, (int)8, void at::native::_cuda_scatte…
365
+ 0.0 8,639 2 4,319.5 4,319.5 4,128 4,511 270.8 void at::native::<unnamed>::distribution_elementwise_grid_stride_kernel<float, (int)4, void at::nat…
366
+ 0.0 3,616 2 1,808.0 1,808.0 1,600 2,016 294.2 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl_nocast<at::n…
367
+ 0.0 3,489 2 1,744.5 1,744.5 1,696 1,793 68.6 void at::native::vectorized_elementwise_kernel<(int)2, at::native::CUDAFunctorOnOther_add<long>, st…
368
+ 0.0 3,103 2 1,551.5 1,551.5 1,503 1,600 68.6 void at::native::vectorized_elementwise_kernel<(int)2, at::native::<unnamed>::where_kernel_impl(at:…
369
+ 0.0 2,976 2 1,488.0 1,488.0 1,376 1,600 158.4 void at::native::vectorized_elementwise_kernel<(int)4, void at::native::compare_scalar_kernel<float…
370
+ 0.0 2,975 2 1,487.5 1,487.5 991 1,984 702.2 void <unnamed>::elementwise_kernel_with_index<int, at::native::arange_cuda_out(const c10::Scalar &,…
371
+ 0.0 2,944 2 1,472.0 1,472.0 1,344 1,600 181.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::CUDAFunctorOnOther_add<float>, s…
372
+ 0.0 2,400 1 2,400.0 2,400.0 2,400 2,400 0.0 void at::native::elementwise_kernel<(int)128, (int)4, void at::native::gpu_kernel_impl<at::native::…
373
+ 0.0 1,185 1 1,185.0 1,185.0 1,185 1,185 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso…
374
+ 0.0 1,025 1 1,025.0 1,025.0 1,025 1,025 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor<float, float, floa…
375
+ 0.0 1,025 1 1,025.0 1,025.0 1,025 1,025 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BUnaryFunctor<float, float, floa…
376
+ 0.0 896 1 896.0 896.0 896 896 0.0 void at::native::vectorized_elementwise_kernel<(int)2, at::native::FillFunctor<double>, std::array<…
377
+
378
+ [7/8] Executing 'cuda_gpu_mem_time_sum' stats report
379
+
380
+ Time (%) Total Time (ns) Count Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Operation
381
+ -------- --------------- ------ -------- -------- -------- ----------- ----------- ------------------------------
382
+ 93.8 627,226,731 42,463 14,771.1 352.0 320 112,333,155 587,539.7 [CUDA memcpy Host-to-Device]
383
+ 2.8 18,735,373 14,448 1,296.7 928.0 895 1,362,505 22,615.1 [CUDA memcpy Device-to-Device]
384
+ 2.4 16,204,705 24,393 664.3 768.0 320 8,224 282.8 [CUDA memset]
385
+ 1.0 6,719,471 5,919 1,135.2 1,120.0 863 1,920 102.9 [CUDA memcpy Device-to-Host]
386
+
387
+ [8/8] Executing 'cuda_gpu_mem_size_sum' stats report
388
+
389
+ Total (MB) Count Avg (MB) Med (MB) Min (MB) Max (MB) StdDev (MB) Operation
390
+ ---------- ------ -------- -------- -------- -------- ----------- ------------------------------
391
+ 4,194.770 42,463 0.099 0.000 0.000 466.747 2.582 [CUDA memcpy Host-to-Device]
392
+ 2,533.618 14,448 0.175 0.003 0.000 622.330 10.354 [CUDA memcpy Device-to-Device]
393
+ 17.613 24,393 0.001 0.001 0.000 0.006 0.000 [CUDA memset]
394
+ 4.192 5,919 0.001 0.000 0.000 0.004 0.001 [CUDA memcpy Device-to-Host]
395
+
396
+ Generated:
397
+ /data/cy/kv_cache_vs_util/std_traverse_bs/traverse_bs_util_std.nsys-rep
398
+ /data/cy/kv_cache_vs_util/std_traverse_bs/traverse_bs_util_std.sqlite
traverse_bs_util_std.nsys-rep ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdc4881551bfa6904baceba8d047d0fc2738308fbadb2d1c3d42a14840041bd6
3
+ size 134872655
traverse_bs_util_std.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import statistics
4
+ from typing import List, Tuple, Dict
5
+
6
+ import torch
7
+ import torch.cuda.nvtx as nvtx
8
+
9
+ from vllm import LLM, SamplingParams
10
+ from transformers import AutoTokenizer
11
+
12
+ # ========= 强制使用 vLLM V1 =========
13
+ os.environ.setdefault("VLLM_USE_V1", "1")
14
+ os.environ.setdefault("VLLM_WORKER_MULTIPROC_METHOD", "spawn")
15
+
16
+ # 可选:打开 V1 metrics 统计
17
+ os.environ.setdefault("VLLM_LOGGING_LEVEL", "INFO")
18
+
19
+ # ========= 试图导入 V1 metrics 类型(兼容不同版本)=========
20
+ try:
21
+ from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector # type: ignore
22
+ except Exception:
23
+ Counter = Gauge = Histogram = Vector = type("X", (), {}) # dummy
24
+
25
+ # ========= 配置 =========
26
+ MODEL_NAME = "Qwen/Qwen2-1.5B"
27
+ DTYPE = "bfloat16"
28
+ TP = 1
29
+ GPU_MEM_UTIL = 0.90
30
+ TRUST_REMOTE_CODE = True
31
+
32
+ # 场景:prefill=输入tokens,decode=输出tokens
33
+ SCENARIOS = [
34
+ # {"name": "prefill640_decode1", "prompt_tokens": 640, "max_new_tokens": 1},
35
+ # {"name": "prefill1_decode512", "prompt_tokens": 1, "max_new_tokens": 512},
36
+ {"name": "prefill640_decode512", "prompt_tokens": 640, "max_new_tokens": 512},
37
+ ]
38
+
39
+ BATCH_SIZES = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
40
+
41
+ SEED = 1234
42
+ TEMPERATURE = 0.0
43
+ TOP_P = 1.0
44
+ WARMUP_PER_BS = 1 # 每个批次做一次预热
45
+
46
+ # ========= 构造“精确 token 数量”的 prompt =========
47
+ def build_exact_token_prompt(tokenizer, target_len: int) -> str:
48
+ if target_len <= 1:
49
+ # 最小化 prompt:用一个简单 token(避免空串导致0 token)
50
+ ids = tokenizer("A", add_special_tokens=False)["input_ids"]
51
+ if len(ids) >= 1:
52
+ return tokenizer.decode(ids[:1], skip_special_tokens=True, clean_up_tokenization_spaces=False)
53
+
54
+ base_text = (
55
+ "You are a helpful assistant. "
56
+ "Please analyze the following input and respond succinctly. "
57
+ )
58
+ chunk = " ".join(["data"] * 100) + ". "
59
+ text = base_text + chunk * 200 # 足够长的文本
60
+
61
+ lo, hi = 0, len(text)
62
+ target_ids = None
63
+ while lo <= hi:
64
+ mid = (lo + hi) // 2
65
+ ids = tokenizer(text[:mid], add_special_tokens=False)["input_ids"]
66
+ if len(ids) == target_len:
67
+ target_ids = ids
68
+ break
69
+ if len(ids) < target_len:
70
+ lo = mid + 1
71
+ else:
72
+ hi = mid - 1
73
+
74
+ if target_ids is None:
75
+ ids = tokenizer(text[:lo], add_special_tokens=False)["input_ids"]
76
+ if len(ids) > target_len:
77
+ target_ids = ids[:target_len]
78
+ else:
79
+ filler = " data"
80
+ while len(ids) < target_len:
81
+ ids = tokenizer(tokenizer.decode(ids) + filler, add_special_tokens=False)["input_ids"]
82
+ target_ids = ids[:target_len]
83
+
84
+ prompt = tokenizer.decode(target_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
85
+ # 断言精确长度
86
+ assert len(tokenizer(prompt, add_special_tokens=False)["input_ids"]) == target_len
87
+ return prompt
88
+
89
+ # ========= V1 metrics 抽取工具 =========
90
+ TTFT_METRIC_NAME = "vllm:time_to_first_token_seconds"
91
+ TPOT_METRIC_NAME = "vllm:time_per_output_token_seconds" # per-output-token latency
92
+
93
+ def _iter_children_of_vector(vec_obj):
94
+ for attr in ("children", "metrics", "series", "values", "samples", "items"):
95
+ if hasattr(vec_obj, attr):
96
+ val = getattr(vec_obj, attr)
97
+ if isinstance(val, dict):
98
+ for v in val.values():
99
+ yield v
100
+ else:
101
+ try:
102
+ for v in val:
103
+ yield v
104
+ except TypeError:
105
+ pass
106
+
107
+ def _collect_hist_sum_count(metrics, metric_name: str):
108
+ total_sum = 0.0
109
+ total_count = 0.0
110
+ for m in metrics:
111
+ mname = getattr(m, "name", None)
112
+ if mname != metric_name:
113
+ continue
114
+ # 直接 Histogram
115
+ if isinstance(m, Histogram) or m.__class__.__name__ == "Histogram":
116
+ total_sum += float(getattr(m, "sum", 0.0))
117
+ total_count += float(getattr(m, "count", 0.0))
118
+ continue
119
+ # Vector[Histogram]
120
+ if isinstance(m, Vector) or m.__class__.__name__ == "Vector":
121
+ for child in _iter_children_of_vector(m):
122
+ if isinstance(child, Histogram) or child.__class__.__name__ == "Histogram":
123
+ total_sum += float(getattr(child, "sum", 0.0))
124
+ total_count += float(getattr(child, "count", 0.0))
125
+ return total_sum, total_count
126
+
127
+ def _metrics_snapshot(llm) -> Dict[str, float]:
128
+ try:
129
+ mets = llm.get_metrics() # V1: 返回 Metric 列表(包含 Histogram/Vector 等)
130
+ except Exception:
131
+ return {"ttft_sum": 0.0, "ttft_cnt": 0.0, "tpot_sum": 0.0, "tpot_cnt": 0.0}
132
+ ttft_sum, ttft_cnt = _collect_hist_sum_count(mets, TTFT_METRIC_NAME)
133
+ tpot_sum, tpot_cnt = _collect_hist_sum_count(mets, TPOT_METRIC_NAME)
134
+ return {"ttft_sum": ttft_sum, "ttft_cnt": ttft_cnt, "tpot_sum": tpot_sum, "tpot_cnt": tpot_cnt}
135
+
136
+ def _metrics_delta(before: dict, after: dict):
137
+ return {
138
+ "ttft_sum": after["ttft_sum"] - before["ttft_sum"],
139
+ "ttft_cnt": after["ttft_cnt"] - before["ttft_cnt"],
140
+ "tpot_sum": after["tpot_sum"] - before["tpot_sum"],
141
+ "tpot_cnt": after["tpot_cnt"] - before["tpot_cnt"],
142
+ }
143
+
144
+ # ========= 带 NVTX 的 generate 包装 =========
145
+ def decorated_generate(llm: LLM, prompts: List[str], params: SamplingParams):
146
+ return llm.generate(prompts, params)
147
+
148
+ # ========= 统计格式化 =========
149
+ def fmt_stats(x: List[float]) -> Tuple[float, float, float]:
150
+ xs = [v for v in x if (v == v)] # 过滤 NaN
151
+ if not xs:
152
+ return (float("nan"), float("nan"), float("nan"))
153
+ return (statistics.mean(xs), statistics.median(xs), statistics.quantiles(xs, n=10)[-1]) # p90
154
+
155
+ def main():
156
+ print("--- vLLM V1 基准测试(含 NVTX 标记)---")
157
+ print(f"模型: {MODEL_NAME}")
158
+ print(f"批量大小: {BATCH_SIZES}")
159
+ print(f"场景: {[s['name'] for s in SCENARIOS]}")
160
+ print("-" * 60)
161
+
162
+ if not torch.cuda.is_available():
163
+ print("错误:需要 CUDA GPU。")
164
+ return
165
+
166
+ print("加载分词器/模型中...")
167
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True, trust_remote_code=TRUST_REMOTE_CODE)
168
+
169
+ # 用 NVTX 标记模型加载阶段
170
+ nvtx.range_push("LLM_init")
171
+ llm = LLM(
172
+ model=MODEL_NAME,
173
+ tensor_parallel_size=TP,
174
+ dtype=DTYPE,
175
+ trust_remote_code=TRUST_REMOTE_CODE,
176
+ gpu_memory_utilization=GPU_MEM_UTIL,
177
+ max_num_seqs=1024, # 足够覆盖本次扫描
178
+ max_model_len=4096,
179
+ disable_log_stats=False, # 开启 V1 metrics 收集
180
+ )
181
+ nvtx.range_pop()
182
+ print("模型加载完成。")
183
+
184
+ for sc in SCENARIOS:
185
+ name = sc["name"]
186
+ prompt_tokens = sc["prompt_tokens"]
187
+ max_new_tokens = sc["max_new_tokens"]
188
+
189
+ print(f"\n===== 场景:{name} | prefill={prompt_tokens}, decode={max_new_tokens} =====")
190
+
191
+ # 准备精确长度 prompt
192
+ prompt_text = build_exact_token_prompt(tokenizer, prompt_tokens)
193
+
194
+ # 采样参数(贪心)
195
+ sampling_params = SamplingParams(
196
+ max_tokens=max_new_tokens,
197
+ temperature=TEMPERATURE,
198
+ top_p=TOP_P,
199
+ seed=SEED,
200
+ n=1,
201
+ )
202
+
203
+ # 记录每个 bs 的结果(便于后续统计或外部解析)
204
+ for bs in BATCH_SIZES:
205
+ print(f"\n--- 批量大小 bs={bs} ---")
206
+
207
+ prompts = [prompt_text] * bs
208
+
209
+ # 预热
210
+ # print("预热中...")
211
+ # nvtx.range_push(f"WARMUP [{name}] bs={bs}")
212
+ # _ = decorated_generate(llm, [prompts[0]], sampling_params)
213
+ # torch.cuda.synchronize()
214
+ # nvtx.range_pop()
215
+
216
+ # 正式计时与 V1 metrics
217
+ # nvtx.range_push(f"RUN [{name}] bs={bs}")
218
+ torch.cuda.synchronize()
219
+ snap_before = _metrics_snapshot(llm)
220
+ t0 = time.perf_counter()
221
+
222
+ nvtx.range_push(f"generate [{name}] bs={bs}")
223
+ outputs = decorated_generate(llm, prompts, sampling_params)
224
+ nvtx.range_pop() # generate
225
+
226
+ torch.cuda.synchronize()
227
+ t1 = time.perf_counter()
228
+ snap_after = _metrics_snapshot(llm)
229
+ # nvtx.range_pop() # RUN
230
+
231
+ duration = t1 - t0
232
+
233
+ # 统计 token 与吞吐
234
+ total_output_tokens = sum(len(o.outputs[0].token_ids) for o in outputs)
235
+ avg_prompt_tokens = sum(len(o.prompt_token_ids) for o in outputs) / bs
236
+ throughput = total_output_tokens / duration if duration > 0 else float("inf")
237
+
238
+ # 解析 V1 TTFT / 解码吞吐
239
+ delta = _metrics_delta(snap_before, snap_after)
240
+ if delta["ttft_cnt"] > 0:
241
+ ttft = delta["ttft_sum"] / delta["ttft_cnt"]
242
+ else:
243
+ ttft = float("nan")
244
+
245
+ if delta["tpot_cnt"] > 0:
246
+ avg_tpot = delta["tpot_sum"] / delta["tpot_cnt"] # seconds/token
247
+ decode_tps = 1.0 / avg_tpot
248
+ else:
249
+ decode_tps = float("nan")
250
+
251
+ print(f"执行时间: {duration:.4f} s")
252
+ print(f"实际平均输入 tokens: {avg_prompt_tokens:.2f}(目标 {prompt_tokens})")
253
+ print(f"生成总 tokens: {total_output_tokens}")
254
+ print(f"吞吐(生成tokens/秒): {throughput:.2f}")
255
+ print(f"TTFT (V1 metrics): {ttft:.4f} s")
256
+ print(f"解码吞吐 (V1 metrics): {decode_tps:.2f} tok/s")
257
+
258
+ print("\n完成。提示:在 Nsight Systems 中可通过 NVTX 区间快速定位各场景/批量的调用。")
259
+
260
+ if __name__ == "__main__":
261
+ print(f"CUDA_VISIBLE_DEVICES = {os.getenv('CUDA_VISIBLE_DEVICES')}")
262
+ main()