add llama merge benchmark
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- eval/0623_32k/logs/I-Phi4.log +6 -5
- eval/0623_32k/logs/Llama3-8B.log +35 -0
- eval/0623_32k/logs/R-Phi4.log +4 -4
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|aime24|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_challenge|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_easy|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|commonsenseqa|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gpqa_diamond|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gsm8k|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|math_500|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|mmlu_pro|0_2025-06-26T21-33-22.888531.parquet +3 -0
- eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|truthfulqa|0_2025-06-26T21-33-22.888531.parquet +2 -2
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|aime24_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|arc_challenge_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|arc_easy_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|commonsenseqa_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|gpqa_diamond_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|gsm8k_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|math_500_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|truthfulqa_c|0_2025-07-08T16-02-52.425181.parquet +3 -0
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|aime24|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gpqa_diamond|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|mmlu_pro|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet +2 -2
- eval/0623_32k/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json +11 -6
- eval/0623_32k/results/._models_Llama3-8B/results_2025-07-08T16-02-52.425181.json +89 -0
- eval/0623_32k/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json +3 -3
- merge_bench2/logs/phi_darelinear_1.log +101 -0
- merge_bench2/logs/phi_darelinear_3.log +101 -0
- merge_bench2/logs/phi_darelinear_5.log +101 -0
- merge_bench2/logs/phi_darelinear_7.log +101 -0
- merge_bench2/logs/phi_darelinear_9.log +101 -0
- merge_bench2/logs/phi_linear_1.log +101 -0
- merge_bench2/logs/phi_linear_3.log +101 -0
- merge_bench2/logs/phi_linear_5.log +101 -0
- merge_bench2/logs/phi_linear_7.log +101 -0
- merge_bench2/logs/phi_linear_9.log +101 -0
- merge_bench2/logs/phi_ties_1.log +101 -0
- merge_bench2/logs/phi_ties_3.log +101 -0
- merge_bench2/logs/phi_ties_5.log +101 -0
- merge_bench2/logs/phi_ties_7.log +101 -0
- merge_bench2/logs/phi_ties_9.log +101 -0
- merge_bench2/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet +3 -0
- merge_bench2/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet +3 -0
- merge_bench2/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet +3 -0
eval/0623_32k/logs/I-Phi4.log
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
-
INFO
|
| 2 |
-
INFO
|
| 3 |
-
INFO
|
| 4 |
-
INFO
|
| 5 |
| Task |Version| Metric |Value | |Stderr|
|
| 6 |
|------------------|------:|---------------------|-----:|---|-----:|
|
| 7 |
|all | |math_pass@1:1_samples|0.5459|± |0.0253|
|
| 8 |
-
| | |sem |0.
|
| 9 |
|mm\|aime24\|0 | 3|math_pass@1:1_samples|0.0667|± |0.0463|
|
| 10 |
|mm\|arc_challenge\|0| 0|sem |0.8959|± |0.0089|
|
| 11 |
|mm\|arc_easy\|0 | 0|sem |0.9529|± |0.0043|
|
|
@@ -13,5 +13,6 @@ INFO 06-26 22:19:24 [__init__.py:239] Automatically detected platform cuda.
|
|
| 13 |
|mm\|gpqa_diamond\|0 | 2|sem |0.3737|± |0.0345|
|
| 14 |
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8772|± |0.0090|
|
| 15 |
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.6940|± |0.0206|
|
|
|
|
| 16 |
|mm\|truthfulqa\|0 | 0|sem |0.5703|± |0.0194|
|
| 17 |
|
|
|
|
| 1 |
+
INFO 07-07 15:13:29 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 2 |
+
INFO 07-07 15:13:29 [__init__.py:239] Automatically detected platform cuda.
|
| 3 |
+
INFO 07-07 16:00:44 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 4 |
+
INFO 07-07 16:00:44 [__init__.py:239] Automatically detected platform cuda.
|
| 5 |
| Task |Version| Metric |Value | |Stderr|
|
| 6 |
|------------------|------:|---------------------|-----:|---|-----:|
|
| 7 |
|all | |math_pass@1:1_samples|0.5459|± |0.0253|
|
| 8 |
+
| | |sem |0.6514|± |0.0141|
|
| 9 |
|mm\|aime24\|0 | 3|math_pass@1:1_samples|0.0667|± |0.0463|
|
| 10 |
|mm\|arc_challenge\|0| 0|sem |0.8959|± |0.0089|
|
| 11 |
|mm\|arc_easy\|0 | 0|sem |0.9529|± |0.0043|
|
|
|
|
| 13 |
|mm\|gpqa_diamond\|0 | 2|sem |0.3737|± |0.0345|
|
| 14 |
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8772|± |0.0090|
|
| 15 |
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.6940|± |0.0206|
|
| 16 |
+
|mm\|mmlu_pro\|0 | 0|sem |0.3901|± |0.0044|
|
| 17 |
|mm\|truthfulqa\|0 | 0|sem |0.5703|± |0.0194|
|
| 18 |
|
eval/0623_32k/logs/Llama3-8B.log
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-08 16:02:50 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 2 |
+
INFO 07-08 16:02:50 [__init__.py:239] Automatically detected platform cuda.
|
| 3 |
+
INFO 07-08 16:43:27 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 4 |
+
INFO 07-08 16:43:27 [__init__.py:239] Automatically detected platform cuda.
|
| 5 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 6 |
+
|--------------------|------:|---------------------|-----:|---|-----:|
|
| 7 |
+
|all | |math_pass@1:1_samples|0.1892|± |0.0097|
|
| 8 |
+
| | |em |0.6406|± |0.0167|
|
| 9 |
+
| | |qem |0.6427|± |0.0167|
|
| 10 |
+
| | |pem |0.6412|± |0.0167|
|
| 11 |
+
| | |pqem |0.7188|± |0.0165|
|
| 12 |
+
|mm\|aime24_c\|0 | 3|math_pass@1:1_samples|0.0000|± |0.0000|
|
| 13 |
+
|mm\|arc_challenge_c\|0| 0|em |0.7978|± |0.0117|
|
| 14 |
+
| | |qem |0.7978|± |0.0117|
|
| 15 |
+
| | |pem |0.7978|± |0.0117|
|
| 16 |
+
| | |pqem |0.8439|± |0.0106|
|
| 17 |
+
|mm\|arc_easy_c\|0 | 0|em |0.9154|± |0.0057|
|
| 18 |
+
| | |qem |0.9154|± |0.0057|
|
| 19 |
+
| | |pem |0.9154|± |0.0057|
|
| 20 |
+
| | |pqem |0.9432|± |0.0048|
|
| 21 |
+
|mm\|commonsenseqa_c\|0| 0|em |0.6994|± |0.0131|
|
| 22 |
+
| | |qem |0.7084|± |0.0130|
|
| 23 |
+
| | |pem |0.6994|± |0.0131|
|
| 24 |
+
| | |pqem |0.7592|± |0.0122|
|
| 25 |
+
|mm\|gpqa_diamond_c\|0 | 1|em |0.3333|± |0.0336|
|
| 26 |
+
| | |qem |0.3333|± |0.0336|
|
| 27 |
+
| | |pem |0.3333|± |0.0336|
|
| 28 |
+
| | |pqem |0.4697|± |0.0356|
|
| 29 |
+
|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|0.4276|± |0.0136|
|
| 30 |
+
|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|0.1400|± |0.0155|
|
| 31 |
+
|mm\|truthfulqa_c\|0 | 0|em |0.4572|± |0.0195|
|
| 32 |
+
| | |qem |0.4587|± |0.0195|
|
| 33 |
+
| | |pem |0.4602|± |0.0195|
|
| 34 |
+
| | |pqem |0.5780|± |0.0193|
|
| 35 |
+
|
eval/0623_32k/logs/R-Phi4.log
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
-
INFO
|
| 2 |
-
INFO
|
| 3 |
-
INFO
|
| 4 |
-
INFO
|
| 5 |
| Task |Version| Metric |Value | |Stderr|
|
| 6 |
|------------------|------:|---------------------|-----:|---|-----:|
|
| 7 |
|all | |sem |0.6602|± |0.0139|
|
|
|
|
| 1 |
+
INFO 07-07 13:51:35 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 2 |
+
INFO 07-07 13:51:35 [__init__.py:239] Automatically detected platform cuda.
|
| 3 |
+
INFO 07-07 15:03:11 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 4 |
+
INFO 07-07 15:03:11 [__init__.py:239] Automatically detected platform cuda.
|
| 5 |
| Task |Version| Metric |Value | |Stderr|
|
| 6 |
|------------------|------:|---------------------|-----:|---|-----:|
|
| 7 |
|all | |sem |0.6602|± |0.0139|
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|aime24|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7f5edb3a86f6fc1a2eb34b2bdfe8ee87a2358547156fe1fcfba8b985fd222cd
|
| 3 |
+
size 2842358
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_challenge|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5517116817ead9d74d9e6fc1160dc2f25f10cbd49de1243dc71be2ed49219178
|
| 3 |
+
size 4431084
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_easy|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fc26f5b451d785f3f99b685bdb93754b558e41552b616615c79bf27532c4ae3
|
| 3 |
+
size 7341712
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|commonsenseqa|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c769545da075b660ed6840ff97d72636548b9f28ef002361a70d179bd7749289
|
| 3 |
+
size 3498936
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gpqa_diamond|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9af166331fd59d5bb2e2b52dbbffedb420778a408b12c6cb2f08f0e68df59c82
|
| 3 |
+
size 4715442
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gsm8k|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6cef7032c07b67d8449674fe00abea2f6db1516512e7e8191315b3a2a0722628
|
| 3 |
+
size 5384687
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|math_500|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30570ca816d7cdc0f83ada08040678828f994ab8e2fce0fae009f9f7632424eb
|
| 3 |
+
size 9730533
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|mmlu_pro|0_2025-06-26T21-33-22.888531.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f8b95610b422d1210f57bbf4725e71c40a1c9e2592de18beee14e25645a450e
|
| 3 |
+
size 114560377
|
eval/0623_32k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|truthfulqa|0_2025-06-26T21-33-22.888531.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d8e1d687558d95703c83d1abfb1f18e0a63ffd484255c269efa26de8797906c
|
| 3 |
+
size 2091069
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|aime24_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:456ae9f422f1c49ce369bbba363c185880f09ea7f7c0c15f0bce35841a603362
|
| 3 |
+
size 2112450
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|arc_challenge_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:53ec8c3f09a797e7d5cafff90ee4a230afc0afe211c362170df14b3ea11e6b0f
|
| 3 |
+
size 424815
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|arc_easy_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2a37e845d2714c18d9223485e07c4c9090ab4d3708eef55f5704b77150f78e56
|
| 3 |
+
size 739322
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|commonsenseqa_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:077a2274df994f03d696a207a6111899655a06052f9c9fff43939a048fc91d44
|
| 3 |
+
size 286355
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|gpqa_diamond_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b3067ca6946ed6d4d5f498632e9976795136f22a04984cf625ce496fa2af6e8
|
| 3 |
+
size 165999
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|gsm8k_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0eed089809b13b22a43cb0b297f7138d2645131a337ca17594b9fbfebb669bba
|
| 3 |
+
size 2776633
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|math_500_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ac37951ae72b24b9601e0d0011e909010fdb6675e3bffea7a44764a607c7d6e1
|
| 3 |
+
size 30347540
|
eval/0623_32k/outputs/._models_Llama3-8B/2025-07-08T16-02-52.425181/outputs_mm|truthfulqa_c|0_2025-07-08T16-02-52.425181.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8bc35267ceadb0dbbf0dc3ab48bcb454ba15cbaa1fca73d545f687f658e92b81
|
| 3 |
+
size 233773
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|aime24|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f5506032a82b52eae2ade513c8e12caa1dc757c108532eedfeb788cce788961
|
| 3 |
+
size 5657633
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:44a293f4002da928ee5deaa4140cbd8222404c2778e670d3f2146321117866da
|
| 3 |
+
size 30285972
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:337222163b8b3702497e20e8e02469d78fa09e25ce1930c69494e30fbe76961e
|
| 3 |
+
size 38522350
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33652dec68cdfb1651ea0147556bfaadb2ad0fbf9fc855ce1fd9ee7068111505
|
| 3 |
+
size 36217965
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gpqa_diamond|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d388fc601a5c2210df109574b769724c3eaa8a3053b42106d84b0c47190e737b
|
| 3 |
+
size 36517050
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b22e954185905b479ba58e33eb805844386cfbe5fc6af13a1cce51b3e707b67d
|
| 3 |
+
size 20510656
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88a806f4e91cd9e689d1f49dc5e92e56ce33f5c071a14d425174deaaceb4a0a8
|
| 3 |
+
size 25460106
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|mmlu_pro|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fb940e06d7b7d02558169f1aac53ebc37461ec43228c28b883dfc805e69b44f8
|
| 3 |
+
size 583083934
|
eval/0623_32k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1153d49b33a6d6696e5a4a18ba6e3f5ff349881089235f3c25f6d7bc3dc062e
|
| 3 |
+
size 32323750
|
eval/0623_32k/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json
CHANGED
|
@@ -6,7 +6,7 @@
|
|
| 6 |
},
|
| 7 |
"mm|commonsenseqa|0": {
|
| 8 |
"sem": 0.7256347256347256,
|
| 9 |
-
"sem_stderr": 0.
|
| 10 |
},
|
| 11 |
"mm|arc_easy|0": {
|
| 12 |
"sem": 0.9528619528619529,
|
|
@@ -14,7 +14,7 @@
|
|
| 14 |
},
|
| 15 |
"mm|truthfulqa|0": {
|
| 16 |
"sem": 0.5703363914373089,
|
| 17 |
-
"sem_stderr": 0.
|
| 18 |
},
|
| 19 |
"mm|gpqa_diamond|0": {
|
| 20 |
"sem": 0.37373737373737376,
|
|
@@ -24,9 +24,13 @@
|
|
| 24 |
"math_pass@1:1_samples": 0.06666666666666667,
|
| 25 |
"math_pass@1:1_samples_stderr": 0.046320555585310084
|
| 26 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
"mm|gsm8k|0": {
|
| 28 |
"math_pass@1:1_samples": 0.8771796815769523,
|
| 29 |
-
"math_pass@1:1_samples_stderr": 0.
|
| 30 |
},
|
| 31 |
"mm|arc_challenge|0": {
|
| 32 |
"sem": 0.8959044368600683,
|
|
@@ -34,9 +38,9 @@
|
|
| 34 |
},
|
| 35 |
"all": {
|
| 36 |
"math_pass@1:1_samples": 0.545948782747873,
|
| 37 |
-
"math_pass@1:1_samples_stderr": 0.
|
| 38 |
-
"sem": 0.
|
| 39 |
-
"sem_stderr": 0.
|
| 40 |
}
|
| 41 |
},
|
| 42 |
"versions": {
|
|
@@ -57,6 +61,7 @@
|
|
| 57 |
"mm|truthfulqa|0": 654,
|
| 58 |
"mm|gpqa_diamond|0": 198,
|
| 59 |
"mm|aime24|0": 30,
|
|
|
|
| 60 |
"mm|gsm8k|0": 1319,
|
| 61 |
"mm|arc_challenge|0": 1172
|
| 62 |
}
|
|
|
|
| 6 |
},
|
| 7 |
"mm|commonsenseqa|0": {
|
| 8 |
"sem": 0.7256347256347256,
|
| 9 |
+
"sem_stderr": 0.012774493368021566
|
| 10 |
},
|
| 11 |
"mm|arc_easy|0": {
|
| 12 |
"sem": 0.9528619528619529,
|
|
|
|
| 14 |
},
|
| 15 |
"mm|truthfulqa|0": {
|
| 16 |
"sem": 0.5703363914373089,
|
| 17 |
+
"sem_stderr": 0.019371945425947196
|
| 18 |
},
|
| 19 |
"mm|gpqa_diamond|0": {
|
| 20 |
"sem": 0.37373737373737376,
|
|
|
|
| 24 |
"math_pass@1:1_samples": 0.06666666666666667,
|
| 25 |
"math_pass@1:1_samples_stderr": 0.046320555585310084
|
| 26 |
},
|
| 27 |
+
"mm|mmlu_pro|0": {
|
| 28 |
+
"sem": 0.39012632978723405,
|
| 29 |
+
"sem_stderr": 0.0044470469001339505
|
| 30 |
+
},
|
| 31 |
"mm|gsm8k|0": {
|
| 32 |
"math_pass@1:1_samples": 0.8771796815769523,
|
| 33 |
+
"math_pass@1:1_samples_stderr": 0.009041108602874685
|
| 34 |
},
|
| 35 |
"mm|arc_challenge|0": {
|
| 36 |
"sem": 0.8959044368600683,
|
|
|
|
| 38 |
},
|
| 39 |
"all": {
|
| 40 |
"math_pass@1:1_samples": 0.545948782747873,
|
| 41 |
+
"math_pass@1:1_samples_stderr": 0.025330411395510052,
|
| 42 |
+
"sem": 0.6514335350531105,
|
| 43 |
+
"sem_stderr": 0.014055907050591466
|
| 44 |
}
|
| 45 |
},
|
| 46 |
"versions": {
|
|
|
|
| 61 |
"mm|truthfulqa|0": 654,
|
| 62 |
"mm|gpqa_diamond|0": 198,
|
| 63 |
"mm|aime24|0": 30,
|
| 64 |
+
"mm|mmlu_pro|0": 12032,
|
| 65 |
"mm|gsm8k|0": 1319,
|
| 66 |
"mm|arc_challenge|0": 1172
|
| 67 |
}
|
eval/0623_32k/results/._models_Llama3-8B/results_2025-07-08T16-02-52.425181.json
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"results": {
|
| 3 |
+
"mm|math_500_c|0": {
|
| 4 |
+
"math_pass@1:1_samples": 0.14,
|
| 5 |
+
"math_pass@1:1_samples_stderr": 0.015533272840269646
|
| 6 |
+
},
|
| 7 |
+
"mm|gpqa_diamond_c|0": {
|
| 8 |
+
"em": 0.3333333333333333,
|
| 9 |
+
"em_stderr": 0.03358618145732522,
|
| 10 |
+
"qem": 0.3333333333333333,
|
| 11 |
+
"qem_stderr": 0.03358618145732522,
|
| 12 |
+
"pem": 0.3333333333333333,
|
| 13 |
+
"pem_stderr": 0.03358618145732522,
|
| 14 |
+
"pqem": 0.4696969696969697,
|
| 15 |
+
"pqem_stderr": 0.0355580405176393
|
| 16 |
+
},
|
| 17 |
+
"mm|aime24_c|0": {
|
| 18 |
+
"math_pass@1:1_samples": 0.0,
|
| 19 |
+
"math_pass@1:1_samples_stderr": 0.0
|
| 20 |
+
},
|
| 21 |
+
"mm|gsm8k_c|0": {
|
| 22 |
+
"math_pass@1:1_samples": 0.4275966641394996,
|
| 23 |
+
"math_pass@1:1_samples_stderr": 0.013627322286986807
|
| 24 |
+
},
|
| 25 |
+
"mm|arc_challenge_c|0": {
|
| 26 |
+
"em": 0.7977815699658704,
|
| 27 |
+
"em_stderr": 0.011737454431872105,
|
| 28 |
+
"qem": 0.7977815699658704,
|
| 29 |
+
"qem_stderr": 0.011737454431872105,
|
| 30 |
+
"pem": 0.7977815699658704,
|
| 31 |
+
"pem_stderr": 0.011737454431872105,
|
| 32 |
+
"pqem": 0.8438566552901023,
|
| 33 |
+
"pqem_stderr": 0.010607612134427466
|
| 34 |
+
},
|
| 35 |
+
"mm|commonsenseqa_c|0": {
|
| 36 |
+
"em": 0.6994266994266994,
|
| 37 |
+
"em_stderr": 0.013127027292480048,
|
| 38 |
+
"qem": 0.7084357084357085,
|
| 39 |
+
"qem_stderr": 0.013011802821401589,
|
| 40 |
+
"pem": 0.6994266994266994,
|
| 41 |
+
"pem_stderr": 0.013127027292480048,
|
| 42 |
+
"pqem": 0.7592137592137592,
|
| 43 |
+
"pqem_stderr": 0.012241029737913621
|
| 44 |
+
},
|
| 45 |
+
"mm|truthfulqa_c|0": {
|
| 46 |
+
"em": 0.45718654434250766,
|
| 47 |
+
"em_stderr": 0.019494649519802777,
|
| 48 |
+
"qem": 0.45871559633027525,
|
| 49 |
+
"qem_stderr": 0.019499699530949086,
|
| 50 |
+
"pem": 0.4602446483180428,
|
| 51 |
+
"pem_stderr": 0.019504564668261167,
|
| 52 |
+
"pqem": 0.5779816513761468,
|
| 53 |
+
"pqem_stderr": 0.01932707366600138
|
| 54 |
+
},
|
| 55 |
+
"mm|arc_easy_c|0": {
|
| 56 |
+
"em": 0.9154040404040404,
|
| 57 |
+
"em_stderr": 0.0057101749707174545,
|
| 58 |
+
"qem": 0.9154040404040404,
|
| 59 |
+
"qem_stderr": 0.0057101749707174545,
|
| 60 |
+
"pem": 0.9154040404040404,
|
| 61 |
+
"pem_stderr": 0.0057101749707174545,
|
| 62 |
+
"pqem": 0.9431818181818182,
|
| 63 |
+
"pqem_stderr": 0.00475017455781462
|
| 64 |
+
},
|
| 65 |
+
"all": {
|
| 66 |
+
"math_pass@1:1_samples": 0.18919888804649987,
|
| 67 |
+
"math_pass@1:1_samples_stderr": 0.009720198375752151,
|
| 68 |
+
"em": 0.6406264374944902,
|
| 69 |
+
"em_stderr": 0.01673109753443952,
|
| 70 |
+
"qem": 0.6427340496938456,
|
| 71 |
+
"qem_stderr": 0.01670906264245309,
|
| 72 |
+
"pem": 0.6412380582895972,
|
| 73 |
+
"pem_stderr": 0.016733080564131197,
|
| 74 |
+
"pqem": 0.7187861707517593,
|
| 75 |
+
"pqem_stderr": 0.016496786122759278
|
| 76 |
+
}
|
| 77 |
+
},
|
| 78 |
+
"versions": {
|
| 79 |
+
"mm|aime24_c|0": 3,
|
| 80 |
+
"mm|arc_challenge_c|0": 0,
|
| 81 |
+
"mm|arc_easy_c|0": 0,
|
| 82 |
+
"mm|commonsenseqa_c|0": 0,
|
| 83 |
+
"mm|gpqa_diamond_c|0": 1,
|
| 84 |
+
"mm|gsm8k_c|0": 0,
|
| 85 |
+
"mm|math_500_c|0": 3,
|
| 86 |
+
"mm|mmlu_pro_c|0": 0,
|
| 87 |
+
"mm|truthfulqa_c|0": 0
|
| 88 |
+
}
|
| 89 |
+
}
|
eval/0623_32k/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json
CHANGED
|
@@ -14,7 +14,7 @@
|
|
| 14 |
},
|
| 15 |
"mm|math_500|0": {
|
| 16 |
"math_pass@1:1_samples": 0.89,
|
| 17 |
-
"math_pass@1:1_samples_stderr": 0.
|
| 18 |
},
|
| 19 |
"mm|arc_challenge|0": {
|
| 20 |
"sem": 0.878839590443686,
|
|
@@ -30,7 +30,7 @@
|
|
| 30 |
},
|
| 31 |
"mm|arc_easy|0": {
|
| 32 |
"sem": 0.9452861952861953,
|
| 33 |
-
"sem_stderr": 0.
|
| 34 |
},
|
| 35 |
"mm|mmlu_pro|0": {
|
| 36 |
"sem": 0.546875,
|
|
@@ -40,7 +40,7 @@
|
|
| 40 |
"sem": 0.6601657415196733,
|
| 41 |
"sem_stderr": 0.013893408156259154,
|
| 42 |
"math_pass@1:1_samples": 0.745887456827563,
|
| 43 |
-
"math_pass@1:1_samples_stderr": 0.
|
| 44 |
}
|
| 45 |
},
|
| 46 |
"versions": {
|
|
|
|
| 14 |
},
|
| 15 |
"mm|math_500|0": {
|
| 16 |
"math_pass@1:1_samples": 0.89,
|
| 17 |
+
"math_pass@1:1_samples_stderr": 0.014006869199415621
|
| 18 |
},
|
| 19 |
"mm|arc_challenge|0": {
|
| 20 |
"sem": 0.878839590443686,
|
|
|
|
| 30 |
},
|
| 31 |
"mm|arc_easy|0": {
|
| 32 |
"sem": 0.9452861952861953,
|
| 33 |
+
"sem_stderr": 0.004666575631131668
|
| 34 |
},
|
| 35 |
"mm|mmlu_pro|0": {
|
| 36 |
"sem": 0.546875,
|
|
|
|
| 40 |
"sem": 0.6601657415196733,
|
| 41 |
"sem_stderr": 0.013893408156259154,
|
| 42 |
"math_pass@1:1_samples": 0.745887456827563,
|
| 43 |
+
"math_pass@1:1_samples_stderr": 0.03791158116728402
|
| 44 |
}
|
| 45 |
},
|
| 46 |
"versions": {
|
merge_bench2/logs/phi_darelinear_1.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 01:46:21 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 01:46:23 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 01:46:23 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 01:46:31 [config.py:717] This model supports multiple tasks: {'classify', 'embed', 'generate', 'score', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 01:46:31 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 01:46:31 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 01:46:32 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_darelinear_1', speculative_config=None, tokenizer='./merged1/phi_darelinear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_darelinear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 01:46:32 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 01:46:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_320f0274'), local_subscribe_addr='ipc:///tmp/ef542976-275d-44d2-a206-ec4a43253c55', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 01:46:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1512d3fa3fd0>
|
| 11 |
+
WARNING 07-06 01:46:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1512d3fa3d30>
|
| 12 |
+
WARNING 07-06 01:46:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1512d3fa3310>
|
| 13 |
+
WARNING 07-06 01:46:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1512d2578f10>
|
| 14 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c7e9f694'), local_subscribe_addr='ipc:///tmp/3c0ff71a-fb9e-4e5e-ad20-d3788fe1fb79', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 15 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_aa664e66'), local_subscribe_addr='ipc:///tmp/e6acd350-ebca-4ef8-9cea-88a00b133b7f', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3159a9e0'), local_subscribe_addr='ipc:///tmp/a4ee572b-6d0f-43f0-ab8c-2eacdc42b618', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ed30c8fe'), local_subscribe_addr='ipc:///tmp/6b1abc0c-4920-4dda-9f5e-11f50e7f4ae6', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:35 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:35 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:35 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:35 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:35 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:35 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:35 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:35 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m WARNING 07-06 01:46:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m WARNING 07-06 01:46:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m WARNING 07-06 01:46:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m WARNING 07-06 01:46:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_d88251be'), local_subscribe_addr='ipc:///tmp/565c92f5-39f0-4734-bcde-ed8434943c8f', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:36 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 32 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:36 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 33 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:36 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 34 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:36 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 35 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m WARNING 07-06 01:46:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m WARNING 07-06 01:46:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 40 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m WARNING 07-06 01:46:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 42 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m WARNING 07-06 01:46:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_1...
|
| 44 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_1...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_1...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_1...
|
| 47 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:41 [loader.py:458] Loading weights took 5.13 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:41 [loader.py:458] Loading weights took 5.31 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:41 [loader.py:458] Loading weights took 5.31 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:41 [loader.py:458] Loading weights took 5.30 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 5.431536 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 5.569417 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 5.569807 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 5.570462 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/93042111ed/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/93042111ed/rank_0_0 for vLLM's torch.compile
|
| 57 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:49 [backends.py:430] Dynamo bytecode transform time: 7.58 s
|
| 58 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/93042111ed/rank_1_0 for vLLM's torch.compile
|
| 59 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/93042111ed/rank_3_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:49 [backends.py:430] Dynamo bytecode transform time: 7.58 s
|
| 61 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:49 [backends.py:430] Dynamo bytecode transform time: 7.58 s
|
| 62 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:49 [backends.py:430] Dynamo bytecode transform time: 7.58 s
|
| 63 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:46:54 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:46:54 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:46:54 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:46:54 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:47:15 [backends.py:148] Compiling a graph for general shape takes 25.02 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:47:15 [backends.py:148] Compiling a graph for general shape takes 25.28 s
|
| 69 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:47:15 [backends.py:148] Compiling a graph for general shape takes 25.32 s
|
| 70 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:47:15 [backends.py:148] Compiling a graph for general shape takes 25.40 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:47:37 [monitor.py:33] torch.compile takes 32.90 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:47:37 [monitor.py:33] torch.compile takes 32.60 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:47:37 [monitor.py:33] torch.compile takes 32.86 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:47:37 [monitor.py:33] torch.compile takes 32.98 s in total
|
| 75 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 01:47:39 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=1 pid=3913354)[0;0m INFO 07-06 01:48:08 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=3 pid=3913356)[0;0m INFO 07-06 01:48:08 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=2 pid=3913355)[0;0m INFO 07-06 01:48:08 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=0 pid=3913353)[0;0m INFO 07-06 01:48:08 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 01:48:08 [core.py:159] init engine (profile, create kv cache, warmup model) took 86.39 seconds
|
| 88 |
+
INFO 07-06 01:48:08 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 01:49:18 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 01:49:18 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8862|± |0.0187|
|
| 94 |
+
| | |math_pass@1:1_samples|0.9607|± |0.0178|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9423|± |0.0120|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9778|± |0.0048|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.8313|± |0.0210|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9463|± |0.0107|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9750|± |0.0250|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.7934|± |0.0370|
|
| 101 |
+
|
merge_bench2/logs/phi_darelinear_3.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 01:49:17 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 01:49:19 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 01:49:19 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 01:49:25 [config.py:717] This model supports multiple tasks: {'generate', 'reward', 'classify', 'embed', 'score'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 01:49:26 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 01:49:26 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 01:49:27 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_darelinear_3', speculative_config=None, tokenizer='./merged1/phi_darelinear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_darelinear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 01:49:27 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 01:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_c00e4e92'), local_subscribe_addr='ipc:///tmp/9a2051fe-f334-40fd-a278-5287f6399f4e', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 01:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14aa6a160f70>
|
| 11 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a9a5cb11'), local_subscribe_addr='ipc:///tmp/16868dcf-a395-4e65-a290-fb125576f4d8', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 01:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14aa6bb8fe50>
|
| 13 |
+
WARNING 07-06 01:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14aa6bb8ff40>
|
| 14 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_16b687e9'), local_subscribe_addr='ipc:///tmp/b6c103ea-b13e-4301-bc4d-2771b46e51c3', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 15 |
+
WARNING 07-06 01:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14aa6bb8ecb0>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bb638763'), local_subscribe_addr='ipc:///tmp/926c8722-75e3-405a-92bc-10b1d1fa7e66', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e008e93b'), local_subscribe_addr='ipc:///tmp/87c4a958-40c9-49ad-a75d-3b107a3eb6ff', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:29 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:29 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:29 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:29 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:29 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:29 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:29 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:29 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m WARNING 07-06 01:49:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m WARNING 07-06 01:49:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m WARNING 07-06 01:49:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m WARNING 07-06 01:49:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_06f3312e'), local_subscribe_addr='ipc:///tmp/39034025-40d4-4b18-92dd-81dba79948c4', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:30 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 32 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:30 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 33 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:30 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 34 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:30 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:30 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:30 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m WARNING 07-06 01:49:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:30 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:30 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m WARNING 07-06 01:49:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m WARNING 07-06 01:49:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m WARNING 07-06 01:49:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:30 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_3...
|
| 44 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:30 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_3...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:30 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_3...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:30 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_3...
|
| 47 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:34 [loader.py:458] Loading weights took 4.37 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:34 [loader.py:458] Loading weights took 4.43 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:34 [loader.py:458] Loading weights took 4.39 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:34 [loader.py:458] Loading weights took 4.40 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:35 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.555538 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:35 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.619168 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:35 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.618388 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:35 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.624664 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a9e72ffa9a/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:40 [backends.py:430] Dynamo bytecode transform time: 5.59 s
|
| 57 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a9e72ffa9a/rank_3_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:40 [backends.py:430] Dynamo bytecode transform time: 5.60 s
|
| 59 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a9e72ffa9a/rank_1_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:40 [backends.py:430] Dynamo bytecode transform time: 5.68 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a9e72ffa9a/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:40 [backends.py:430] Dynamo bytecode transform time: 5.70 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:49:45 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:49:45 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:49:45 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:49:45 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:50:06 [backends.py:148] Compiling a graph for general shape takes 25.40 s
|
| 68 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:50:07 [backends.py:148] Compiling a graph for general shape takes 25.42 s
|
| 69 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:50:07 [backends.py:148] Compiling a graph for general shape takes 25.83 s
|
| 70 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:50:07 [backends.py:148] Compiling a graph for general shape takes 26.05 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:50:29 [monitor.py:33] torch.compile takes 31.10 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:50:29 [monitor.py:33] torch.compile takes 31.53 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:50:29 [monitor.py:33] torch.compile takes 31.00 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:50:29 [monitor.py:33] torch.compile takes 31.63 s in total
|
| 75 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 01:50:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=2 pid=3915504)[0;0m INFO 07-06 01:50:58 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=0 pid=3915502)[0;0m INFO 07-06 01:50:58 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=3 pid=3915505)[0;0m INFO 07-06 01:50:58 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=1 pid=3915503)[0;0m INFO 07-06 01:50:58 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 01:50:58 [core.py:159] init engine (profile, create kv cache, warmup model) took 82.85 seconds
|
| 88 |
+
INFO 07-06 01:50:58 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 01:55:24 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 01:55:24 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8894|± |0.0184|
|
| 94 |
+
| | |math_pass@1:1_samples|0.9334|± |0.0266|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9501|± |0.0112|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9778|± |0.0048|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.8281|± |0.0211|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9418|± |0.0111|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9250|± |0.0422|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.8017|± |0.0364|
|
| 101 |
+
|
merge_bench2/logs/phi_darelinear_5.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 01:55:23 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 01:55:25 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 01:55:25 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 01:55:32 [config.py:717] This model supports multiple tasks: {'score', 'embed', 'generate', 'classify', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 01:55:32 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 01:55:32 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 01:55:34 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_darelinear_5', speculative_config=None, tokenizer='./merged1/phi_darelinear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_darelinear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 01:55:34 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 01:55:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_2171845e'), local_subscribe_addr='ipc:///tmp/dba3e624-2ac5-4d20-8169-6a4f75252d0a', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 01:55:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14d2c3563f70>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1b40b36f'), local_subscribe_addr='ipc:///tmp/1cdf240a-73da-4faf-a18c-dcb8e6e14a6e', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 01:55:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14d2c1c18fa0>
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_58ea2b66'), local_subscribe_addr='ipc:///tmp/48ec31e2-cf6a-43da-8f28-41c977ee4357', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 01:55:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14d2c3563e80>
|
| 15 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1dfc0477'), local_subscribe_addr='ipc:///tmp/94447305-b98c-4932-8e59-e1605c4caf30', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 16 |
+
WARNING 07-06 01:55:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14d2c3562ce0>
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d9650a1e'), local_subscribe_addr='ipc:///tmp/705611bc-cd53-46b2-abb1-7aa321ee0a7b', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:36 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:36 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:36 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:36 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:36 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:36 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:36 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:36 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m WARNING 07-06 01:55:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m WARNING 07-06 01:55:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m WARNING 07-06 01:55:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m WARNING 07-06 01:55:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_24bcf7cc'), local_subscribe_addr='ipc:///tmp/c4c804a5-efd9-4de3-bf08-92b225ac40b7', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:36 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 32 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:36 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 33 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:36 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 34 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:36 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m WARNING 07-06 01:55:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m WARNING 07-06 01:55:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:36 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m WARNING 07-06 01:55:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m WARNING 07-06 01:55:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_5...
|
| 44 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_5...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_5...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:36 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_5...
|
| 47 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:38 [loader.py:458] Loading weights took 1.56 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.751065 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:38 [loader.py:458] Loading weights took 1.88 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.069132 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:39 [loader.py:458] Loading weights took 2.19 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:39 [loader.py:458] Loading weights took 2.37 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.414548 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.606883 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a7e33e2aed/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:45 [backends.py:430] Dynamo bytecode transform time: 5.63 s
|
| 57 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a7e33e2aed/rank_3_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:45 [backends.py:430] Dynamo bytecode transform time: 5.66 s
|
| 59 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a7e33e2aed/rank_1_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:45 [backends.py:430] Dynamo bytecode transform time: 5.73 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a7e33e2aed/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:45 [backends.py:430] Dynamo bytecode transform time: 5.95 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:55:49 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:55:49 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:55:49 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:55:50 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:56:11 [backends.py:148] Compiling a graph for general shape takes 25.08 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:56:11 [backends.py:148] Compiling a graph for general shape takes 25.40 s
|
| 69 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:56:11 [backends.py:148] Compiling a graph for general shape takes 25.26 s
|
| 70 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:56:11 [backends.py:148] Compiling a graph for general shape takes 25.22 s
|
| 71 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:56:33 [monitor.py:33] torch.compile takes 31.17 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:56:33 [monitor.py:33] torch.compile takes 31.03 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:56:33 [monitor.py:33] torch.compile takes 30.74 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:56:33 [monitor.py:33] torch.compile takes 30.99 s in total
|
| 75 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 01:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=2 pid=3919221)[0;0m INFO 07-06 01:57:05 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=3 pid=3919222)[0;0m INFO 07-06 01:57:05 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=0 pid=3919219)[0;0m INFO 07-06 01:57:05 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=1 pid=3919220)[0;0m INFO 07-06 01:57:05 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 01:57:05 [core.py:159] init engine (profile, create kv cache, warmup model) took 85.49 seconds
|
| 88 |
+
INFO 07-06 01:57:05 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:07:57 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:07:58 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8642|± |0.0201|
|
| 94 |
+
| | |math_pass@1:1_samples|0.8954|± |0.0292|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9265|± |0.0134|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9578|± |0.0065|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.7875|± |0.0229|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8658|± |0.0161|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9250|± |0.0422|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.7851|± |0.0375|
|
| 101 |
+
|
merge_bench2/logs/phi_darelinear_7.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:07:56 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:07:58 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:07:58 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:08:05 [config.py:717] This model supports multiple tasks: {'classify', 'generate', 'embed', 'reward', 'score'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:08:05 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:08:05 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:08:07 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_darelinear_7', speculative_config=None, tokenizer='./merged1/phi_darelinear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_darelinear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:08:07 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:08:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_1bcb022b'), local_subscribe_addr='ipc:///tmp/ee948401-c9f8-41a2-a34e-44dd790f1865', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:08:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14903a1a7fd0>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bfa2d2a8'), local_subscribe_addr='ipc:///tmp/86d51acd-0efd-4dea-8ca3-c4f87555bace', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 02:08:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x149038780f10>
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_79dfef4a'), local_subscribe_addr='ipc:///tmp/1c167278-2df4-417a-b744-d664c84373d5', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 02:08:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14903a1a7d30>
|
| 15 |
+
WARNING 07-06 02:08:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14903a1a7310>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_79b3846e'), local_subscribe_addr='ipc:///tmp/ae256d6f-7965-455a-8523-b801fb96df48', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2861b1af'), local_subscribe_addr='ipc:///tmp/8e727f98-dd48-43de-8f1d-86cbb27336b8', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:18 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:18 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:18 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 21 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:18 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:19 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 23 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:19 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:19 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:19 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m WARNING 07-06 02:08:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m WARNING 07-06 02:08:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m WARNING 07-06 02:08:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m WARNING 07-06 02:08:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_f31c9e4e'), local_subscribe_addr='ipc:///tmp/051d2fb3-e6ad-4c1d-b095-62c5dea7f720', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:19 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 32 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:19 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 33 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:19 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 34 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:19 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:19 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:19 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m WARNING 07-06 02:08:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m WARNING 07-06 02:08:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:19 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:19 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m WARNING 07-06 02:08:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m WARNING 07-06 02:08:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:19 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_7...
|
| 44 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:19 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_7...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:19 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_7...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:19 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_7...
|
| 47 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:21 [loader.py:458] Loading weights took 1.67 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:21 [loader.py:458] Loading weights took 2.03 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:21 [loader.py:458] Loading weights took 1.98 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:21 [loader.py:458] Loading weights took 2.04 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:21 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.882851 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:22 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.218165 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:22 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.216825 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:22 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.216819 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1a71ee31dc/rank_3_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:27 [backends.py:430] Dynamo bytecode transform time: 5.57 s
|
| 57 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1a71ee31dc/rank_2_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:27 [backends.py:430] Dynamo bytecode transform time: 5.58 s
|
| 59 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1a71ee31dc/rank_1_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:27 [backends.py:430] Dynamo bytecode transform time: 5.63 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1a71ee31dc/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:27 [backends.py:430] Dynamo bytecode transform time: 5.69 s
|
| 63 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:32 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:32 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:32 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:32 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:08:53 [backends.py:148] Compiling a graph for general shape takes 24.72 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:08:53 [backends.py:148] Compiling a graph for general shape takes 24.79 s
|
| 69 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:08:53 [backends.py:148] Compiling a graph for general shape takes 24.92 s
|
| 70 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:08:53 [backends.py:148] Compiling a graph for general shape takes 24.81 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:09:15 [monitor.py:33] torch.compile takes 30.34 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:09:15 [monitor.py:33] torch.compile takes 30.37 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:09:15 [monitor.py:33] torch.compile takes 30.49 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:09:15 [monitor.py:33] torch.compile takes 30.50 s in total
|
| 75 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 02:09:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=2 pid=3923543)[0;0m INFO 07-06 02:09:51 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=3 pid=3923544)[0;0m INFO 07-06 02:09:51 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=1 pid=3923542)[0;0m INFO 07-06 02:09:51 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=0 pid=3923541)[0;0m INFO 07-06 02:09:51 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 02:09:51 [core.py:159] init engine (profile, create kv cache, warmup model) took 89.50 seconds
|
| 88 |
+
INFO 07-06 02:09:52 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:21:17 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:21:17 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.2600|± |0.0241|
|
| 94 |
+
| | |math_pass@1:1_samples|0.7685|± |0.0450|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.3465|± |0.0244|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.3411|± |0.0154|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.1375|± |0.0193|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8121|± |0.0185|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7250|± |0.0715|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.2149|± |0.0375|
|
| 101 |
+
|
merge_bench2/logs/phi_darelinear_9.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:21:16 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:21:17 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:21:17 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:21:24 [config.py:717] This model supports multiple tasks: {'score', 'classify', 'embed', 'generate', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:21:24 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:21:24 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:21:26 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_darelinear_9', speculative_config=None, tokenizer='./merged1/phi_darelinear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_darelinear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:21:26 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:21:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_30f1a7b1'), local_subscribe_addr='ipc:///tmp/d1e803f4-0ebf-4e6c-bbfb-9ccad278781a', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:21:26 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1490caf70ee0>
|
| 11 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1476e0ab'), local_subscribe_addr='ipc:///tmp/53537ea2-5867-4e5e-9093-790836a402f1', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 02:21:26 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1490d8a33e80>
|
| 13 |
+
WARNING 07-06 02:21:26 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1490d8a33fa0>
|
| 14 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8e9eab81'), local_subscribe_addr='ipc:///tmp/8dde035c-f6ef-4844-ab48-311f0435aa49', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 15 |
+
WARNING 07-06 02:21:26 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x1490d8a33c70>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b13c31a5'), local_subscribe_addr='ipc:///tmp/2cde312f-120f-4e47-9fe3-61229001312c', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d1b62a6a'), local_subscribe_addr='ipc:///tmp/bcc5ee20-6ad0-4b8e-bfc1-22393c762b74', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:28 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:28 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:28 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 21 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:28 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:28 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:28 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:28 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:28 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m WARNING 07-06 02:21:29 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m WARNING 07-06 02:21:29 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m WARNING 07-06 02:21:29 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m WARNING 07-06 02:21:29 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_09c8b29c'), local_subscribe_addr='ipc:///tmp/ec6382d5-df44-4124-ab06-b0e8d98daf1d', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:29 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 32 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:29 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 33 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:29 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 34 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:29 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:29 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m WARNING 07-06 02:21:29 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 37 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m WARNING 07-06 02:21:29 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:29 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m WARNING 07-06 02:21:29 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 40 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:29 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:29 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 42 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m WARNING 07-06 02:21:29 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:29 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_9...
|
| 44 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:29 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_9...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:29 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_9...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:29 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_darelinear_9...
|
| 47 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:31 [loader.py:458] Loading weights took 1.46 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.659963 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:31 [loader.py:458] Loading weights took 1.82 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:31 [loader.py:458] Loading weights took 1.81 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:31 [loader.py:458] Loading weights took 1.82 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.030502 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.037395 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 2.029022 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/966adedabd/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:37 [backends.py:430] Dynamo bytecode transform time: 5.69 s
|
| 57 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/966adedabd/rank_1_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:37 [backends.py:430] Dynamo bytecode transform time: 5.71 s
|
| 59 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/966adedabd/rank_0_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:37 [backends.py:430] Dynamo bytecode transform time: 5.72 s
|
| 61 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/966adedabd/rank_3_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:37 [backends.py:430] Dynamo bytecode transform time: 5.80 s
|
| 63 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:21:41 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:21:41 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:21:41 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:21:42 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:22:02 [backends.py:148] Compiling a graph for general shape takes 24.80 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:22:03 [backends.py:148] Compiling a graph for general shape takes 24.91 s
|
| 69 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:22:03 [backends.py:148] Compiling a graph for general shape takes 24.97 s
|
| 70 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:22:03 [backends.py:148] Compiling a graph for general shape takes 25.22 s
|
| 71 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:22:25 [monitor.py:33] torch.compile takes 30.69 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:22:25 [monitor.py:33] torch.compile takes 31.02 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:22:25 [monitor.py:33] torch.compile takes 30.52 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:22:25 [monitor.py:33] torch.compile takes 30.61 s in total
|
| 75 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 02:22:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=0 pid=3927757)[0;0m INFO 07-06 02:22:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=1 pid=3927758)[0;0m INFO 07-06 02:22:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=3 pid=3927761)[0;0m INFO 07-06 02:22:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=2 pid=3927760)[0;0m INFO 07-06 02:22:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 02:22:56 [core.py:159] init engine (profile, create kv cache, warmup model) took 85.16 seconds
|
| 88 |
+
INFO 07-06 02:22:57 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:36:36 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:36:36 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.0911|± |0.0135|
|
| 94 |
+
| | |math_pass@1:1_samples|0.5582|± |0.0514|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.1470|± |0.0182|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.1616|± |0.0120|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.0312|± |0.0097|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.5414|± |0.0236|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.5750|± |0.0792|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.0248|± |0.0142|
|
| 101 |
+
|
merge_bench2/logs/phi_linear_1.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:36:35 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:36:37 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:36:37 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:36:44 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'embed', 'classify', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:36:44 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:36:44 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:36:46 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_linear_1', speculative_config=None, tokenizer='./merged1/phi_linear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_linear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:36:46 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:36:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_bba99701'), local_subscribe_addr='ipc:///tmp/5b58d1e3-d352-4e22-97ce-b3481353fce1', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:36:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x151e0da2bf40>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:36:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_dd1b80b4'), local_subscribe_addr='ipc:///tmp/5a17525a-ddc0-417a-aefd-3c288b1c7d73', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 02:36:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x151dd3fbcf70>
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:36:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0a2f2346'), local_subscribe_addr='ipc:///tmp/9391ad30-1ca8-405f-93b7-49ff25e82cd7', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 02:36:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x151e0da2be50>
|
| 15 |
+
WARNING 07-06 02:36:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x151e0da2acb0>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:36:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_fa41f380'), local_subscribe_addr='ipc:///tmp/88807445-2f9b-4d8d-8364-43855f0773f7', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:36:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2cfdcb03'), local_subscribe_addr='ipc:///tmp/8c1be914-f5f9-4792-afa1-d8746e587cd6', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:36:48 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:36:48 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:36:48 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:36:48 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:36:48 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:36:48 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:36:48 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:36:48 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m WARNING 07-06 02:36:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m WARNING 07-06 02:36:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m WARNING 07-06 02:36:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m WARNING 07-06 02:36:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:36:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_3b88a983'), local_subscribe_addr='ipc:///tmp/ee30ae32-937c-4998-8ca9-cb4910068838', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:36:48 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 32 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:36:48 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 33 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:36:48 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 34 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:36:48 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 35 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m WARNING 07-06 02:36:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 36 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:36:48 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:36:48 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 38 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m WARNING 07-06 02:36:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m WARNING 07-06 02:36:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 40 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:36:48 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_1...
|
| 41 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:36:48 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_1...
|
| 42 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:36:48 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_1...
|
| 43 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:36:48 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 44 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:36:48 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 45 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m WARNING 07-06 02:36:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:36:48 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_1...
|
| 47 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:37:00 [loader.py:458] Loading weights took 11.14 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:37:00 [loader.py:458] Loading weights took 11.25 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:37:00 [loader.py:458] Loading weights took 11.26 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:37:00 [loader.py:458] Loading weights took 11.21 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:37:00 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.366286 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:37:00 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.453495 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:37:00 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.458498 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:37:00 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.460177 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:37:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/43ac6c7155/rank_1_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:37:06 [backends.py:430] Dynamo bytecode transform time: 5.54 s
|
| 57 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:37:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/43ac6c7155/rank_3_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:37:06 [backends.py:430] Dynamo bytecode transform time: 5.56 s
|
| 59 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:37:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/43ac6c7155/rank_2_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:37:06 [backends.py:430] Dynamo bytecode transform time: 5.58 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:37:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/43ac6c7155/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:37:06 [backends.py:430] Dynamo bytecode transform time: 5.71 s
|
| 63 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:37:10 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:37:10 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:37:10 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:37:10 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:37:31 [backends.py:148] Compiling a graph for general shape takes 24.49 s
|
| 68 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:37:31 [backends.py:148] Compiling a graph for general shape takes 24.58 s
|
| 69 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:37:31 [backends.py:148] Compiling a graph for general shape takes 24.53 s
|
| 70 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:37:31 [backends.py:148] Compiling a graph for general shape takes 24.96 s
|
| 71 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:37:53 [monitor.py:33] torch.compile takes 30.66 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:37:53 [monitor.py:33] torch.compile takes 30.11 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:37:53 [monitor.py:33] torch.compile takes 30.02 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:37:53 [monitor.py:33] torch.compile takes 30.15 s in total
|
| 75 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 02:37:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=0 pid=3931833)[0;0m INFO 07-06 02:38:25 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=1 pid=3931834)[0;0m INFO 07-06 02:38:25 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=3 pid=3931836)[0;0m INFO 07-06 02:38:25 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=2 pid=3931835)[0;0m INFO 07-06 02:38:25 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 02:38:25 [core.py:159] init engine (profile, create kv cache, warmup model) took 85.17 seconds
|
| 88 |
+
INFO 07-06 02:38:26 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:39:29 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:39:29 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8849|± |0.0188|
|
| 94 |
+
| | |math_pass@1:1_samples|0.9732|± |0.0053|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9423|± |0.0120|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9736|± |0.0052|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.8469|± |0.0202|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9463|± |0.0107|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.7769|± |0.0380|
|
| 101 |
+
|
merge_bench2/logs/phi_linear_3.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:39:28 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:39:30 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:39:30 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:39:37 [config.py:717] This model supports multiple tasks: {'embed', 'reward', 'classify', 'generate', 'score'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:39:37 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:39:37 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:39:38 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_linear_3', speculative_config=None, tokenizer='./merged1/phi_linear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_linear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:39:38 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:39:38 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_a5c01040'), local_subscribe_addr='ipc:///tmp/e28dd0a8-d7d9-484b-a9df-fd784df532e5', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:39:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe0f043f70>
|
| 11 |
+
WARNING 07-06 02:39:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe0d628eb0>
|
| 12 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_44c0b1be'), local_subscribe_addr='ipc:///tmp/f726f257-7494-4ae1-9440-c7a0d887e826', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_efeec022'), local_subscribe_addr='ipc:///tmp/21d65996-02e4-4e78-b9f6-32ff5b943186', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 02:39:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe0f043e50>
|
| 15 |
+
WARNING 07-06 02:39:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe0f043c40>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bd8749ad'), local_subscribe_addr='ipc:///tmp/ff402d23-b98b-4c76-85b4-c4ac7cc322cd', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f2ba94d4'), local_subscribe_addr='ipc:///tmp/e3551197-456e-47e4-85ac-fb4f426b53ca', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:41 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:41 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 20 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:41 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:41 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 22 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:41 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:41 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:41 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:41 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m WARNING 07-06 02:39:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m WARNING 07-06 02:39:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m WARNING 07-06 02:39:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m WARNING 07-06 02:39:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_9980edb2'), local_subscribe_addr='ipc:///tmp/59fc7118-f292-43a0-b4a6-2fb643396424', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:41 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 32 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:41 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 33 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:41 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 34 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:41 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:41 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 36 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m WARNING 07-06 02:39:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 37 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:41 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 38 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:41 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 39 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m WARNING 07-06 02:39:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 40 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m WARNING 07-06 02:39:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:41 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 42 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m WARNING 07-06 02:39:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:41 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_3...
|
| 44 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:41 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_3...
|
| 45 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:41 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_3...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:41 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_3...
|
| 47 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:55 [loader.py:458] Loading weights took 13.34 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:55 [loader.py:458] Loading weights took 13.49 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:55 [loader.py:458] Loading weights took 13.46 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:55 [loader.py:458] Loading weights took 13.46 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:39:55 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.529307 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:39:55 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.679594 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:39:55 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.688507 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:39:55 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.676726 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:40:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/41d22a25e0/rank_3_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:40:01 [backends.py:430] Dynamo bytecode transform time: 5.58 s
|
| 57 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:40:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/41d22a25e0/rank_1_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:40:01 [backends.py:430] Dynamo bytecode transform time: 5.67 s
|
| 59 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:40:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/41d22a25e0/rank_2_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:40:01 [backends.py:430] Dynamo bytecode transform time: 5.72 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:40:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/41d22a25e0/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:40:01 [backends.py:430] Dynamo bytecode transform time: 5.75 s
|
| 63 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:40:05 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:40:05 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:40:05 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:40:05 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:40:27 [backends.py:148] Compiling a graph for general shape takes 25.04 s
|
| 68 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:40:27 [backends.py:148] Compiling a graph for general shape takes 25.07 s
|
| 69 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:40:27 [backends.py:148] Compiling a graph for general shape takes 25.26 s
|
| 70 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:40:27 [backends.py:148] Compiling a graph for general shape takes 25.46 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:40:49 [monitor.py:33] torch.compile takes 30.74 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:40:49 [monitor.py:33] torch.compile takes 31.01 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:40:49 [monitor.py:33] torch.compile takes 31.18 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:40:49 [monitor.py:33] torch.compile takes 30.61 s in total
|
| 75 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 02:40:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=1 pid=3934889)[0;0m INFO 07-06 02:41:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=3 pid=3934891)[0;0m INFO 07-06 02:41:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=0 pid=3934888)[0;0m INFO 07-06 02:41:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=2 pid=3934890)[0;0m INFO 07-06 02:41:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 02:41:20 [core.py:159] init engine (profile, create kv cache, warmup model) took 85.05 seconds
|
| 88 |
+
INFO 07-06 02:41:21 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:42:33 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:42:33 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8920|± |0.0185|
|
| 94 |
+
| | |math_pass@1:1_samples|0.9504|± |0.0226|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9291|± |0.0132|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9768|± |0.0049|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.8688|± |0.0189|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9508|± |0.0102|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9500|± |0.0349|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.7934|± |0.0370|
|
| 101 |
+
|
merge_bench2/logs/phi_linear_5.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:42:31 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:42:33 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:42:33 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:42:40 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'classify', 'embed', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:42:40 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:42:40 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:42:42 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_linear_5', speculative_config=None, tokenizer='./merged1/phi_linear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_linear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:42:42 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:42:42 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_112ebc41'), local_subscribe_addr='ipc:///tmp/1882ecae-5424-4f82-b248-0f2d7e5fdc8f', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:42:42 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14db3e857fa0>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:42:42 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_370026e6'), local_subscribe_addr='ipc:///tmp/0571c96f-cafc-478b-a90d-095738c3a945', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 02:42:42 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14db3cf0d000>
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:42:42 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cee650a4'), local_subscribe_addr='ipc:///tmp/4e00ab79-208e-4b5e-8dde-dd04200008df', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 02:42:42 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14db3e856d40>
|
| 15 |
+
WARNING 07-06 02:42:42 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14db3e857ee0>
|
| 16 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:42:42 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b0d259b4'), local_subscribe_addr='ipc:///tmp/8b83fc84-07d7-4c46-a2bd-2b3f84ff642c', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:42:42 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1f29a7a3'), local_subscribe_addr='ipc:///tmp/ee9a8865-f497-4e80-87c1-e4529840ac14', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:42:53 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:42:53 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:42:53 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 21 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:42:53 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:42:53 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:42:53 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:42:53 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 25 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:42:53 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m WARNING 07-06 02:42:54 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m WARNING 07-06 02:42:54 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m WARNING 07-06 02:42:54 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m WARNING 07-06 02:42:54 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:42:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_7debc972'), local_subscribe_addr='ipc:///tmp/fd19e95c-cd17-47e2-8304-6822bab90a43', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:42:54 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 32 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:42:54 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 33 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:42:54 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 34 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:42:54 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:42:54 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:42:54 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m WARNING 07-06 02:42:54 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m WARNING 07-06 02:42:54 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m WARNING 07-06 02:42:54 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 40 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:42:54 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_5...
|
| 41 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:42:54 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_5...
|
| 42 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:42:54 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_5...
|
| 43 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:42:54 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 44 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:42:54 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 45 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m WARNING 07-06 02:42:54 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:42:54 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_5...
|
| 47 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:43:07 [loader.py:458] Loading weights took 13.22 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:43:08 [loader.py:458] Loading weights took 13.30 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:43:08 [loader.py:458] Loading weights took 13.24 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:43:08 [loader.py:458] Loading weights took 13.25 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:43:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.413943 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:43:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.493882 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:43:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.502372 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:43:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.478218 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:43:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5c84612a48/rank_1_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:43:13 [backends.py:430] Dynamo bytecode transform time: 5.60 s
|
| 57 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:43:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5c84612a48/rank_3_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:43:13 [backends.py:430] Dynamo bytecode transform time: 5.61 s
|
| 59 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:43:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5c84612a48/rank_2_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:43:13 [backends.py:430] Dynamo bytecode transform time: 5.63 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:43:14 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5c84612a48/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:43:14 [backends.py:430] Dynamo bytecode transform time: 5.98 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:43:18 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:43:18 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:43:18 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:43:18 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:43:39 [backends.py:148] Compiling a graph for general shape takes 25.13 s
|
| 68 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:43:39 [backends.py:148] Compiling a graph for general shape takes 25.14 s
|
| 69 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:43:39 [backends.py:148] Compiling a graph for general shape takes 25.20 s
|
| 70 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:43:40 [backends.py:148] Compiling a graph for general shape takes 25.04 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:44:02 [monitor.py:33] torch.compile takes 30.74 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:44:02 [monitor.py:33] torch.compile takes 30.76 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:44:02 [monitor.py:33] torch.compile takes 30.82 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:44:02 [monitor.py:33] torch.compile takes 31.02 s in total
|
| 75 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 02:44:03 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=1 pid=3937823)[0;0m INFO 07-06 02:44:36 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=3 pid=3937825)[0;0m INFO 07-06 02:44:36 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=2 pid=3937824)[0;0m INFO 07-06 02:44:36 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=0 pid=3937822)[0;0m INFO 07-06 02:44:36 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 02:44:36 [core.py:159] init engine (profile, create kv cache, warmup model) took 88.26 seconds
|
| 88 |
+
INFO 07-06 02:44:37 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:46:13 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:46:13 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8782|± |0.0191|
|
| 94 |
+
| | |math_pass@1:1_samples|0.9347|± |0.0239|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9423|± |0.0120|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9810|± |0.0044|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.8375|± |0.0207|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9195|± |0.0129|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9500|± |0.0349|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.7521|± |0.0394|
|
| 101 |
+
|
merge_bench2/logs/phi_linear_7.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:46:12 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:46:14 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:46:14 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:46:21 [config.py:717] This model supports multiple tasks: {'generate', 'embed', 'classify', 'score', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:46:21 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:46:21 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:46:22 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_linear_7', speculative_config=None, tokenizer='./merged1/phi_linear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_linear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:46:22 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:46:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_ce4c166f'), local_subscribe_addr='ipc:///tmp/9c8d640e-8d6e-4ee8-b0ef-db2baef5e786', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:46:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x154313863fd0>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_16defaa0'), local_subscribe_addr='ipc:///tmp/935be012-6f66-4106-8bc9-e7e31a3aa51c', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 02:46:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x154311f14f10>
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ff4c5b18'), local_subscribe_addr='ipc:///tmp/31fd90f4-a7d7-4de2-ae6a-559c689da219', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 02:46:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x154313863d30>
|
| 15 |
+
WARNING 07-06 02:46:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x154313863310>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b27dd7df'), local_subscribe_addr='ipc:///tmp/9d20279d-b32d-4cb7-8a6a-b30af9f5c77e', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e1a03c9e'), local_subscribe_addr='ipc:///tmp/4b451eca-316d-4280-9087-2bf463e45cb4', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:24 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:24 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:24 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 21 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:24 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:24 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:24 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 24 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:24 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:24 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m WARNING 07-06 02:46:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m WARNING 07-06 02:46:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m WARNING 07-06 02:46:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m WARNING 07-06 02:46:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_6fbe7734'), local_subscribe_addr='ipc:///tmp/0c9267d0-be15-4f44-87c6-74ef0054aa0f', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:25 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 32 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:25 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 33 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:25 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 34 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:25 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 35 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:25 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:25 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m WARNING 07-06 02:46:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m WARNING 07-06 02:46:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:25 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:25 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m WARNING 07-06 02:46:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m WARNING 07-06 02:46:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:25 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_7...
|
| 44 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:25 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_7...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:25 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_7...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:25 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_7...
|
| 47 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:39 [loader.py:458] Loading weights took 13.74 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:39 [loader.py:458] Loading weights took 13.87 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:39 [loader.py:458] Loading weights took 13.86 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:39 [loader.py:458] Loading weights took 13.83 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 14.052956 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 14.052767 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.948024 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 14.053611 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/39e4f38180/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:45 [backends.py:430] Dynamo bytecode transform time: 5.58 s
|
| 57 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/39e4f38180/rank_3_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:45 [backends.py:430] Dynamo bytecode transform time: 5.62 s
|
| 59 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/39e4f38180/rank_1_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:45 [backends.py:430] Dynamo bytecode transform time: 5.71 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/39e4f38180/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:45 [backends.py:430] Dynamo bytecode transform time: 5.79 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:46:49 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:46:49 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:46:50 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:46:50 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:47:11 [backends.py:148] Compiling a graph for general shape takes 25.07 s
|
| 68 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:47:11 [backends.py:148] Compiling a graph for general shape takes 25.20 s
|
| 69 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:47:11 [backends.py:148] Compiling a graph for general shape takes 25.04 s
|
| 70 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:47:11 [backends.py:148] Compiling a graph for general shape takes 25.13 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:47:33 [monitor.py:33] torch.compile takes 30.75 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:47:33 [monitor.py:33] torch.compile takes 30.81 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:47:33 [monitor.py:33] torch.compile takes 30.92 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:47:33 [monitor.py:33] torch.compile takes 30.65 s in total
|
| 75 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 02:47:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=2 pid=3940568)[0;0m INFO 07-06 02:48:05 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=3 pid=3940569)[0;0m INFO 07-06 02:48:05 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=1 pid=3940567)[0;0m INFO 07-06 02:48:05 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=0 pid=3940566)[0;0m INFO 07-06 02:48:05 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 02:48:05 [core.py:159] init engine (profile, create kv cache, warmup model) took 85.74 seconds
|
| 88 |
+
INFO 07-06 02:48:05 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:49:54 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:49:54 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8754|± |0.0195|
|
| 94 |
+
| | |math_pass@1:1_samples|0.8782|± |0.0341|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9396|± |0.0122|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9662|± |0.0059|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.8438|± |0.0203|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8814|± |0.0153|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8750|± |0.0530|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.7521|± |0.0394|
|
| 101 |
+
|
merge_bench2/logs/phi_linear_9.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:49:53 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:49:55 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:49:55 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:50:02 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'generate', 'reward', 'classify'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:50:02 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:50:02 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:50:03 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_linear_9', speculative_config=None, tokenizer='./merged1/phi_linear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_linear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:50:03 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:50:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_b519e4fe'), local_subscribe_addr='ipc:///tmp/28925ff2-293f-492e-ad54-acab53b337fc', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:50:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x148c8580cfa0>
|
| 11 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c6ecda1f'), local_subscribe_addr='ipc:///tmp/c7dbebf6-19d4-49cd-8711-5d6dd4dae91c', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 02:50:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x148c87167f70>
|
| 13 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9464b9ea'), local_subscribe_addr='ipc:///tmp/40821cd8-5d98-4f24-9bf7-2b68bca621bf', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 02:50:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x148c87166ce0>
|
| 15 |
+
WARNING 07-06 02:50:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x148c87167e80>
|
| 16 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e28dc3de'), local_subscribe_addr='ipc:///tmp/1d96cd06-0467-4030-85e0-946e264263f4', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b327abc9'), local_subscribe_addr='ipc:///tmp/7275f8cb-e7de-4d42-aae8-a2da293e4203', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:05 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:05 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:05 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:05 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:05 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:05 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:05 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:05 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m WARNING 07-06 02:50:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m WARNING 07-06 02:50:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m WARNING 07-06 02:50:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m WARNING 07-06 02:50:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_c8444766'), local_subscribe_addr='ipc:///tmp/4b82c3c7-06a1-4ab0-b594-53220b4a55d5', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:06 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 32 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:06 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 33 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:06 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 34 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:06 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 35 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:06 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:06 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m WARNING 07-06 02:50:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m WARNING 07-06 02:50:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:06 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:06 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m WARNING 07-06 02:50:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m WARNING 07-06 02:50:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:06 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_9...
|
| 44 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:06 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_9...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:06 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_9...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:06 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_linear_9...
|
| 47 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:20 [loader.py:458] Loading weights took 13.90 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:20 [loader.py:458] Loading weights took 14.02 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:20 [loader.py:458] Loading weights took 13.98 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:20 [loader.py:458] Loading weights took 14.02 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:21 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 14.130689 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:21 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 14.203956 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:21 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 14.201688 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:21 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 14.201672 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe3a6231bf/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:26 [backends.py:430] Dynamo bytecode transform time: 5.56 s
|
| 57 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe3a6231bf/rank_0_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:26 [backends.py:430] Dynamo bytecode transform time: 5.62 s
|
| 59 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe3a6231bf/rank_3_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:26 [backends.py:430] Dynamo bytecode transform time: 5.65 s
|
| 61 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe3a6231bf/rank_1_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:26 [backends.py:430] Dynamo bytecode transform time: 5.68 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:31 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:31 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:31 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:31 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:50:52 [backends.py:148] Compiling a graph for general shape takes 24.87 s
|
| 68 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:50:52 [backends.py:148] Compiling a graph for general shape takes 24.91 s
|
| 69 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:50:52 [backends.py:148] Compiling a graph for general shape takes 25.00 s
|
| 70 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:50:52 [backends.py:148] Compiling a graph for general shape takes 25.13 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:51:14 [monitor.py:33] torch.compile takes 30.81 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:51:14 [monitor.py:33] torch.compile takes 30.43 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:51:14 [monitor.py:33] torch.compile takes 30.56 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:51:14 [monitor.py:33] torch.compile takes 30.61 s in total
|
| 75 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 02:51:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=3 pid=3943270)[0;0m INFO 07-06 02:51:53 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=2 pid=3943269)[0;0m INFO 07-06 02:51:53 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=0 pid=3943267)[0;0m INFO 07-06 02:51:53 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=1 pid=3943268)[0;0m INFO 07-06 02:51:53 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 02:51:53 [core.py:159] init engine (profile, create kv cache, warmup model) took 92.57 seconds
|
| 88 |
+
INFO 07-06 02:51:53 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 02:59:10 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 02:59:10 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.6572|± |0.0272|
|
| 94 |
+
| | |math_pass@1:1_samples|0.8719|± |0.0302|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.7507|± |0.0222|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.7772|± |0.0135|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.5969|± |0.0275|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8188|± |0.0182|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9250|± |0.0422|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.5041|± |0.0456|
|
| 101 |
+
|
merge_bench2/logs/phi_ties_1.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 02:59:09 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 02:59:10 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 02:59:10 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 02:59:17 [config.py:717] This model supports multiple tasks: {'classify', 'embed', 'score', 'generate', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 02:59:17 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 02:59:17 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 02:59:19 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_ties_1', speculative_config=None, tokenizer='./merged1/phi_ties_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_ties_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 02:59:19 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 02:59:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_1c2919dd'), local_subscribe_addr='ipc:///tmp/7bf7a26b-deb0-4239-a469-ecff6222f327', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 02:59:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14f78d994e20>
|
| 11 |
+
WARNING 07-06 02:59:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14f78f5b3ee0>
|
| 12 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_525b7d48'), local_subscribe_addr='ipc:///tmp/9c0412be-a7a0-48c1-bea1-bd9804434b43', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 13 |
+
WARNING 07-06 02:59:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14f78f5b3e20>
|
| 14 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2e63f1fc'), local_subscribe_addr='ipc:///tmp/52ccbd2f-912f-4611-a518-624b4eb315fe', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 15 |
+
WARNING 07-06 02:59:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14f78f5b3be0>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bb7e1f2a'), local_subscribe_addr='ipc:///tmp/2d82e35a-7ad3-4f65-aa9b-3681d55e310c', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c133ff6d'), local_subscribe_addr='ipc:///tmp/32bc0b1f-e05e-4023-a81e-48f126850760', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:31 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:31 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:31 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 21 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:31 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:31 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 23 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:31 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:31 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:31 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m WARNING 07-06 02:59:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m WARNING 07-06 02:59:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m WARNING 07-06 02:59:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m WARNING 07-06 02:59:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:31 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_fb78652e'), local_subscribe_addr='ipc:///tmp/65c07cec-5c58-4524-afa2-da4fc7ac7056', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:31 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 32 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:31 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 33 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:31 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 34 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:31 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:31 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m WARNING 07-06 02:59:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 37 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:31 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 38 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m WARNING 07-06 02:59:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:31 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:31 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m WARNING 07-06 02:59:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m WARNING 07-06 02:59:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:31 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_1...
|
| 44 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:31 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_1...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:31 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_1...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:31 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_1...
|
| 47 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:45 [loader.py:458] Loading weights took 13.18 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:45 [loader.py:458] Loading weights took 13.29 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:45 [loader.py:458] Loading weights took 13.33 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:45 [loader.py:458] Loading weights took 13.29 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:45 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.358423 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:45 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.510401 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:45 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.515920 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:45 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.504100 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:51 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/89b1d99067/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:51 [backends.py:430] Dynamo bytecode transform time: 5.66 s
|
| 57 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:51 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/89b1d99067/rank_3_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:51 [backends.py:430] Dynamo bytecode transform time: 5.66 s
|
| 59 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:51 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/89b1d99067/rank_0_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:51 [backends.py:430] Dynamo bytecode transform time: 5.75 s
|
| 61 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:51 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/89b1d99067/rank_1_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:51 [backends.py:430] Dynamo bytecode transform time: 5.76 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 02:59:55 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 02:59:55 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 02:59:56 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 02:59:56 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 03:00:17 [backends.py:148] Compiling a graph for general shape takes 25.01 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 03:00:17 [backends.py:148] Compiling a graph for general shape takes 25.27 s
|
| 69 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 03:00:17 [backends.py:148] Compiling a graph for general shape takes 25.64 s
|
| 70 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 03:00:17 [backends.py:148] Compiling a graph for general shape takes 25.50 s
|
| 71 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 03:00:39 [monitor.py:33] torch.compile takes 31.38 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 03:00:39 [monitor.py:33] torch.compile takes 31.26 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 03:00:39 [monitor.py:33] torch.compile takes 30.68 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 03:00:39 [monitor.py:33] torch.compile takes 30.93 s in total
|
| 75 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 03:00:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=3 pid=3946093)[0;0m INFO 07-06 03:01:12 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=2 pid=3946092)[0;0m INFO 07-06 03:01:12 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=1 pid=3946089)[0;0m INFO 07-06 03:01:12 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=0 pid=3946088)[0;0m INFO 07-06 03:01:12 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 03:01:12 [core.py:159] init engine (profile, create kv cache, warmup model) took 87.18 seconds
|
| 88 |
+
INFO 07-06 03:01:13 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 03:04:35 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 03:04:35 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.8768|± |0.0193|
|
| 94 |
+
| | |math_pass@1:1_samples|0.9312|± |0.0268|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.9318|± |0.0129|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.9789|± |0.0047|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.8281|± |0.0211|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9374|± |0.0115|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9250|± |0.0422|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.7686|± |0.0385|
|
| 101 |
+
|
merge_bench2/logs/phi_ties_3.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 03:04:34 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 03:04:36 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 03:04:36 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 03:04:43 [config.py:717] This model supports multiple tasks: {'classify', 'score', 'embed', 'generate', 'reward'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 03:04:43 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 03:04:43 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 03:04:44 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_ties_3', speculative_config=None, tokenizer='./merged1/phi_ties_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_ties_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 03:04:44 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 03:04:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_d31d3e21'), local_subscribe_addr='ipc:///tmp/0afa905c-696e-49e8-a9f7-ac072d7260b3', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 03:04:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14a9169e7f40>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:04:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7323f4c8'), local_subscribe_addr='ipc:///tmp/b5e77e84-7233-47ad-a3b6-8d6252a2b4cc', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 03:04:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14a914e90f70>
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:04:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a88afe59'), local_subscribe_addr='ipc:///tmp/26c773d3-abce-4d13-be0e-8fe86e22c3cd', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 03:04:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14a9169e7e50>
|
| 15 |
+
WARNING 07-06 03:04:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14a9169e6cb0>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:04:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cd7fc62f'), local_subscribe_addr='ipc:///tmp/3971f486-f21c-445a-a7c3-44978d3f6caf', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:04:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_19fec43b'), local_subscribe_addr='ipc:///tmp/143cc6f4-f292-4d83-8859-209200ba7f7e', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:04:46 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:04:46 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:04:46 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:04:46 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:04:46 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:04:47 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:04:47 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:04:47 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m WARNING 07-06 03:04:47 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m WARNING 07-06 03:04:47 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m WARNING 07-06 03:04:47 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m WARNING 07-06 03:04:47 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:04:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_50ef9bc5'), local_subscribe_addr='ipc:///tmp/6f846a5d-944e-4426-82c3-f5468e0c1c5d', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:04:47 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 32 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:04:47 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 33 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:04:47 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 34 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:04:47 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:04:47 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:04:47 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m WARNING 07-06 03:04:47 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m WARNING 07-06 03:04:47 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:04:47 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:04:47 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m WARNING 07-06 03:04:47 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m WARNING 07-06 03:04:47 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:04:47 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_3...
|
| 44 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:04:47 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_3...
|
| 45 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:04:47 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_3...
|
| 46 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:04:47 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_3...
|
| 47 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:05:00 [loader.py:458] Loading weights took 12.96 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:05:00 [loader.py:458] Loading weights took 13.03 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:05:00 [loader.py:458] Loading weights took 13.03 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:05:00 [loader.py:458] Loading weights took 12.99 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:05:01 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.192772 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:05:01 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.218936 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:05:01 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.219006 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:05:01 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.227179 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:05:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/01f46e37be/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:05:06 [backends.py:430] Dynamo bytecode transform time: 5.57 s
|
| 57 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:05:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/01f46e37be/rank_3_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:05:06 [backends.py:430] Dynamo bytecode transform time: 5.63 s
|
| 59 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:05:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/01f46e37be/rank_0_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:05:06 [backends.py:430] Dynamo bytecode transform time: 5.72 s
|
| 61 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:05:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/01f46e37be/rank_1_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:05:06 [backends.py:430] Dynamo bytecode transform time: 5.73 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:05:11 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:05:11 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:05:11 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:05:11 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:05:32 [backends.py:148] Compiling a graph for general shape takes 25.47 s
|
| 68 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:05:32 [backends.py:148] Compiling a graph for general shape takes 25.39 s
|
| 69 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:05:34 [backends.py:148] Compiling a graph for general shape takes 26.36 s
|
| 70 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:05:34 [backends.py:148] Compiling a graph for general shape takes 26.38 s
|
| 71 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:05:56 [monitor.py:33] torch.compile takes 32.08 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:05:56 [monitor.py:33] torch.compile takes 32.10 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:05:56 [monitor.py:33] torch.compile takes 31.02 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:05:56 [monitor.py:33] torch.compile takes 31.04 s in total
|
| 75 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 03:05:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=3 pid=3949382)[0;0m INFO 07-06 03:06:44 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=2 pid=3949381)[0;0m INFO 07-06 03:06:44 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=1 pid=3949380)[0;0m INFO 07-06 03:06:44 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=0 pid=3949379)[0;0m INFO 07-06 03:06:44 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 03:06:44 [core.py:159] init engine (profile, create kv cache, warmup model) took 103.13 seconds
|
| 88 |
+
INFO 07-06 03:06:44 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 03:17:05 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 03:17:05 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.6482|± |0.0275|
|
| 94 |
+
| | |math_pass@1:1_samples|0.7766|± |0.0441|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.7480|± |0.0223|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.7223|± |0.0146|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.5687|± |0.0277|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8031|± |0.0188|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7500|± |0.0693|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.5537|± |0.0454|
|
| 101 |
+
|
merge_bench2/logs/phi_ties_5.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 03:17:04 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 03:17:06 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 03:17:06 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 03:17:13 [config.py:717] This model supports multiple tasks: {'generate', 'classify', 'embed', 'reward', 'score'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 03:17:13 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 03:17:13 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 03:17:14 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_ties_5', speculative_config=None, tokenizer='./merged1/phi_ties_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_ties_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 03:17:14 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 03:17:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_1aeb2b19'), local_subscribe_addr='ipc:///tmp/4ce7181f-f696-469b-9d59-3a3ba0532656', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 03:17:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe7675ff70>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9d74a4bd'), local_subscribe_addr='ipc:///tmp/073167ed-a886-4c5b-b4f4-c881a91c7360', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 03:17:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe7675fe50>
|
| 13 |
+
WARNING 07-06 03:17:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe74e00eb0>
|
| 14 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2d718e9c'), local_subscribe_addr='ipc:///tmp/ee8533ef-5a1b-4248-83cf-03546f8b65f5', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 15 |
+
WARNING 07-06 03:17:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14fe7675fc40>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e0405c41'), local_subscribe_addr='ipc:///tmp/ac12da02-e53e-4cde-9df4-691838b52f65', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6a234fc3'), local_subscribe_addr='ipc:///tmp/17ff2b28-39ba-4f76-a062-f0ff35d75651', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:16 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:16 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:16 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 21 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:16 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:16 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:16 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:16 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:16 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m WARNING 07-06 03:17:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m WARNING 07-06 03:17:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m WARNING 07-06 03:17:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m WARNING 07-06 03:17:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:17 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_50f8e6b5'), local_subscribe_addr='ipc:///tmp/888e46f6-53e6-4877-aa38-ce9312e98b27', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:17 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 32 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:17 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 33 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:17 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 34 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:17 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 35 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:17 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:17 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m WARNING 07-06 03:17:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m WARNING 07-06 03:17:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:17 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:17 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m WARNING 07-06 03:17:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m WARNING 07-06 03:17:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:17 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_5...
|
| 44 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:17 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_5...
|
| 45 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:17 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_5...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:17 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_5...
|
| 47 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:29 [loader.py:458] Loading weights took 12.38 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:29 [loader.py:458] Loading weights took 12.42 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:30 [loader.py:458] Loading weights took 12.39 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:30 [loader.py:458] Loading weights took 12.40 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:30 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 12.566000 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:30 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 12.600693 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:30 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 12.608823 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:30 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 12.609282 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:36 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e1e28f285d/rank_3_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:36 [backends.py:430] Dynamo bytecode transform time: 5.58 s
|
| 57 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:36 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e1e28f285d/rank_1_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:36 [backends.py:430] Dynamo bytecode transform time: 5.62 s
|
| 59 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:36 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e1e28f285d/rank_2_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:36 [backends.py:430] Dynamo bytecode transform time: 5.65 s
|
| 61 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:36 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e1e28f285d/rank_0_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:36 [backends.py:430] Dynamo bytecode transform time: 5.95 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:17:40 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:17:40 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:17:40 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:17:41 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:18:01 [backends.py:148] Compiling a graph for general shape takes 25.19 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:18:02 [backends.py:148] Compiling a graph for general shape takes 25.32 s
|
| 69 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:18:02 [backends.py:148] Compiling a graph for general shape takes 25.61 s
|
| 70 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:18:03 [backends.py:148] Compiling a graph for general shape takes 26.19 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:18:25 [monitor.py:33] torch.compile takes 31.23 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:18:25 [monitor.py:33] torch.compile takes 32.14 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:18:25 [monitor.py:33] torch.compile takes 30.77 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:18:25 [monitor.py:33] torch.compile takes 30.96 s in total
|
| 75 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 03:18:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=3 pid=3953137)[0;0m INFO 07-06 03:19:11 [gpu_model_runner.py:1686] Graph capturing finished in 43 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=2 pid=3953135)[0;0m INFO 07-06 03:19:11 [gpu_model_runner.py:1686] Graph capturing finished in 43 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=1 pid=3953132)[0;0m INFO 07-06 03:19:11 [gpu_model_runner.py:1686] Graph capturing finished in 44 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=0 pid=3953131)[0;0m INFO 07-06 03:19:11 [gpu_model_runner.py:1686] Graph capturing finished in 44 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 03:19:11 [core.py:159] init engine (profile, create kv cache, warmup model) took 100.82 seconds
|
| 88 |
+
INFO 07-06 03:19:11 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 03:32:53 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 03:32:53 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.1473|± |0.0174|
|
| 94 |
+
| | |math_pass@1:1_samples|0.6414|± |0.0500|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.2310|± |0.0216|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.2397|± |0.0139|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.0688|± |0.0142|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.6577|± |0.0225|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.6250|± |0.0775|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.0496|± |0.0198|
|
| 101 |
+
|
merge_bench2/logs/phi_ties_7.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 03:32:52 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 03:32:54 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 03:32:54 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 03:33:01 [config.py:717] This model supports multiple tasks: {'classify', 'reward', 'score', 'embed', 'generate'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 03:33:01 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 03:33:01 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 03:33:02 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_ties_7', speculative_config=None, tokenizer='./merged1/phi_ties_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_ties_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 03:33:02 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 03:33:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_ad4cd58b'), local_subscribe_addr='ipc:///tmp/67049ee1-4d29-4037-924e-033fe1a41620', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 03:33:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14901957ffa0>
|
| 11 |
+
WARNING 07-06 03:33:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x149017b69000>
|
| 12 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9c043181'), local_subscribe_addr='ipc:///tmp/8f23a03f-3e7d-4981-9614-8d9cc0c37b87', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c297e158'), local_subscribe_addr='ipc:///tmp/fcd2a874-c38a-4726-9afb-aef406a66e18', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 03:33:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14901957fee0>
|
| 15 |
+
WARNING 07-06 03:33:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x14901957ed40>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7e5799f8'), local_subscribe_addr='ipc:///tmp/bdda0205-fcb1-434a-ac34-24ab76467748', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_23f73f8f'), local_subscribe_addr='ipc:///tmp/226ccf98-16b4-4ddd-b866-edb4e0d4b583', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:04 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:04 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:04 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 21 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:04 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 22 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:04 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 23 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:04 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 24 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:04 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:04 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m WARNING 07-06 03:33:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m WARNING 07-06 03:33:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m WARNING 07-06 03:33:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m WARNING 07-06 03:33:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_b4576b69'), local_subscribe_addr='ipc:///tmp/7df58fd2-3ac2-46df-8fff-6ab5cd32e45f', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:05 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 32 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:05 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 33 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:05 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 34 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:05 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:05 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:05 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:05 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 38 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:05 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 39 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m WARNING 07-06 03:33:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 40 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m WARNING 07-06 03:33:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 41 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m WARNING 07-06 03:33:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m WARNING 07-06 03:33:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:05 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_7...
|
| 44 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:05 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_7...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:05 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_7...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:05 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_7...
|
| 47 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:18 [loader.py:458] Loading weights took 12.74 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:18 [loader.py:458] Loading weights took 12.86 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:18 [loader.py:458] Loading weights took 12.80 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:18 [loader.py:458] Loading weights took 12.86 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 12.988445 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.050820 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.055578 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 13.045876 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/efefaeffc9/rank_2_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:24 [backends.py:430] Dynamo bytecode transform time: 5.71 s
|
| 57 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/efefaeffc9/rank_0_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:24 [backends.py:430] Dynamo bytecode transform time: 5.76 s
|
| 59 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/efefaeffc9/rank_1_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:24 [backends.py:430] Dynamo bytecode transform time: 5.82 s
|
| 61 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/efefaeffc9/rank_3_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:24 [backends.py:430] Dynamo bytecode transform time: 5.83 s
|
| 63 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:28 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:29 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:29 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:29 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:33:50 [backends.py:148] Compiling a graph for general shape takes 25.48 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:33:50 [backends.py:148] Compiling a graph for general shape takes 25.64 s
|
| 69 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:33:51 [backends.py:148] Compiling a graph for general shape takes 25.89 s
|
| 70 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:33:51 [backends.py:148] Compiling a graph for general shape takes 26.11 s
|
| 71 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:34:13 [monitor.py:33] torch.compile takes 31.30 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:34:13 [monitor.py:33] torch.compile takes 31.72 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:34:13 [monitor.py:33] torch.compile takes 31.35 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:34:13 [monitor.py:33] torch.compile takes 31.87 s in total
|
| 75 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 03:34:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=3 pid=3957275)[0;0m INFO 07-06 03:35:01 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=2 pid=3957274)[0;0m INFO 07-06 03:35:01 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=0 pid=3957272)[0;0m INFO 07-06 03:35:01 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=1 pid=3957273)[0;0m INFO 07-06 03:35:01 [gpu_model_runner.py:1686] Graph capturing finished in 46 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 03:35:01 [core.py:159] init engine (profile, create kv cache, warmup model) took 103.07 seconds
|
| 88 |
+
INFO 07-06 03:35:02 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 03:49:02 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 03:49:03 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.0703|± |0.0127|
|
| 94 |
+
| | |math_pass@1:1_samples|0.4306|± |0.0513|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.0892|± |0.0146|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.1267|± |0.0108|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.0406|± |0.0111|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.4362|± |0.0235|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.4250|± |0.0792|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.0248|± |0.0142|
|
| 101 |
+
|
merge_bench2/logs/phi_ties_9.log
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
INFO 07-06 03:49:01 [__init__.py:239] Automatically detected platform cuda.
|
| 2 |
+
INFO 07-06 03:49:03 [config.py:209] Replacing legacy 'type' key with 'rope_type'
|
| 3 |
+
INFO 07-06 03:49:03 [config.py:2968] Downcasting torch.float32 to torch.float16.
|
| 4 |
+
INFO 07-06 03:49:10 [config.py:717] This model supports multiple tasks: {'reward', 'classify', 'score', 'embed', 'generate'}. Defaulting to 'generate'.
|
| 5 |
+
INFO 07-06 03:49:10 [config.py:1770] Defaulting to use mp for distributed inference
|
| 6 |
+
INFO 07-06 03:49:10 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384.
|
| 7 |
+
INFO 07-06 03:49:12 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged1/phi_ties_9', speculative_config=None, tokenizer='./merged1/phi_ties_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged1/phi_ties_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512}
|
| 8 |
+
WARNING 07-06 03:49:12 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.
|
| 9 |
+
INFO 07-06 03:49:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_8eb9755e'), local_subscribe_addr='ipc:///tmp/4ca258b6-f8a8-44ca-bf8b-d992585ba541', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 10 |
+
WARNING 07-06 03:49:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x145b09d47f40>
|
| 11 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e5264b0b'), local_subscribe_addr='ipc:///tmp/66a38d90-bc49-471b-8d4c-771ce84e7fd0', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 12 |
+
WARNING 07-06 03:49:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x145b083f0f70>
|
| 13 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7409db1c'), local_subscribe_addr='ipc:///tmp/cf8c1197-f495-4f69-b20a-a9b7000240c1', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 14 |
+
WARNING 07-06 03:49:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x145b09d47e50>
|
| 15 |
+
WARNING 07-06 03:49:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in <vllm.v1.worker.gpu_worker.Worker object at 0x145b09d46cb0>
|
| 16 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_85e11c6f'), local_subscribe_addr='ipc:///tmp/10c6487b-1b6d-47c0-bd39-46e7077402ec', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 17 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_45e5b516'), local_subscribe_addr='ipc:///tmp/962acbba-247d-4cfc-9295-7c1ab9939ad4', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 18 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:14 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 19 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:14 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 20 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:14 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 21 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:14 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 22 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:14 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 23 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:14 [utils.py:1055] Found nccl from library libnccl.so.2
|
| 24 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:14 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 25 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:14 [pynccl.py:69] vLLM is using nccl==2.21.5
|
| 26 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m WARNING 07-06 03:49:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 27 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m WARNING 07-06 03:49:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 28 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m WARNING 07-06 03:49:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 29 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m WARNING 07-06 03:49:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly.
|
| 30 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_d8b12038'), local_subscribe_addr='ipc:///tmp/f44c0bc1-9cbd-4a4a-b0a6-2b1efdf756cc', remote_subscribe_addr=None, remote_addr_ipv6=False)
|
| 31 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:15 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2
|
| 32 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:15 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3
|
| 33 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:15 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1
|
| 34 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:15 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0
|
| 35 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:15 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 36 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:15 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 37 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m WARNING 07-06 03:49:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 38 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m WARNING 07-06 03:49:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 39 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:15 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 40 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:15 [cuda.py:221] Using Flash Attention backend on V1 engine.
|
| 41 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m WARNING 07-06 03:49:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 42 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m WARNING 07-06 03:49:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer.
|
| 43 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:15 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_9...
|
| 44 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:15 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_9...
|
| 45 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:15 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_9...
|
| 46 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:15 [gpu_model_runner.py:1329] Starting to load model ./merged1/phi_ties_9...
|
| 47 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:26 [loader.py:458] Loading weights took 11.58 seconds
|
| 48 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:26 [loader.py:458] Loading weights took 11.66 seconds
|
| 49 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:26 [loader.py:458] Loading weights took 11.63 seconds
|
| 50 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:26 [loader.py:458] Loading weights took 11.63 seconds
|
| 51 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:27 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.771326 seconds
|
| 52 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:27 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.845732 seconds
|
| 53 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:27 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.851462 seconds
|
| 54 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:27 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 11.840904 seconds
|
| 55 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:32 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1779764463/rank_3_0 for vLLM's torch.compile
|
| 56 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:32 [backends.py:430] Dynamo bytecode transform time: 5.53 s
|
| 57 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:33 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1779764463/rank_0_0 for vLLM's torch.compile
|
| 58 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:33 [backends.py:430] Dynamo bytecode transform time: 5.61 s
|
| 59 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:33 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1779764463/rank_2_0 for vLLM's torch.compile
|
| 60 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:33 [backends.py:430] Dynamo bytecode transform time: 5.66 s
|
| 61 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:33 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1779764463/rank_1_0 for vLLM's torch.compile
|
| 62 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:33 [backends.py:430] Dynamo bytecode transform time: 5.67 s
|
| 63 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:37 [backends.py:136] Cache the graph of shape None for later use
|
| 64 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:37 [backends.py:136] Cache the graph of shape None for later use
|
| 65 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:37 [backends.py:136] Cache the graph of shape None for later use
|
| 66 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:49:37 [backends.py:136] Cache the graph of shape None for later use
|
| 67 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:49:58 [backends.py:148] Compiling a graph for general shape takes 24.57 s
|
| 68 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:49:58 [backends.py:148] Compiling a graph for general shape takes 24.72 s
|
| 69 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:49:59 [backends.py:148] Compiling a graph for general shape takes 25.34 s
|
| 70 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:50:00 [backends.py:148] Compiling a graph for general shape takes 26.36 s
|
| 71 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:50:22 [monitor.py:33] torch.compile takes 30.38 s in total
|
| 72 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:50:22 [monitor.py:33] torch.compile takes 30.10 s in total
|
| 73 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:50:22 [monitor.py:33] torch.compile takes 30.95 s in total
|
| 74 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:50:22 [monitor.py:33] torch.compile takes 32.03 s in total
|
| 75 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens
|
| 76 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x
|
| 77 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 78 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 79 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens
|
| 80 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x
|
| 81 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens
|
| 82 |
+
INFO 07-06 03:50:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x
|
| 83 |
+
[1;36m(VllmWorker rank=2 pid=3961374)[0;0m INFO 07-06 03:51:06 [gpu_model_runner.py:1686] Graph capturing finished in 42 secs, took 3.00 GiB
|
| 84 |
+
[1;36m(VllmWorker rank=3 pid=3961375)[0;0m INFO 07-06 03:51:06 [gpu_model_runner.py:1686] Graph capturing finished in 42 secs, took 3.00 GiB
|
| 85 |
+
[1;36m(VllmWorker rank=0 pid=3961372)[0;0m INFO 07-06 03:51:07 [gpu_model_runner.py:1686] Graph capturing finished in 43 secs, took 3.00 GiB
|
| 86 |
+
[1;36m(VllmWorker rank=1 pid=3961373)[0;0m INFO 07-06 03:51:07 [gpu_model_runner.py:1686] Graph capturing finished in 43 secs, took 3.00 GiB
|
| 87 |
+
INFO 07-06 03:51:07 [core.py:159] init engine (profile, create kv cache, warmup model) took 99.76 seconds
|
| 88 |
+
INFO 07-06 03:51:07 [core_client.py:439] Core engine process 0 ready.
|
| 89 |
+
INFO 07-06 04:04:58 [importing.py:53] Triton module has been replaced with a placeholder.
|
| 90 |
+
INFO 07-06 04:04:58 [__init__.py:239] Automatically detected platform cuda.
|
| 91 |
+
| Task |Version| Metric |Value | |Stderr|
|
| 92 |
+
|------------------|------:|---------------------|-----:|---|-----:|
|
| 93 |
+
|all | |sem |0.0362|± |0.0094|
|
| 94 |
+
| | |math_pass@1:1_samples|0.3202|± |0.0485|
|
| 95 |
+
|mm\|arc_challenge\|0| 0|sem |0.0577|± |0.0120|
|
| 96 |
+
|mm\|arc_easy\|0 | 0|sem |0.0581|± |0.0076|
|
| 97 |
+
|mm\|commonsenseqa\|0| 0|sem |0.0125|± |0.0062|
|
| 98 |
+
|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.3154|± |0.0220|
|
| 99 |
+
|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.3250|± |0.0750|
|
| 100 |
+
|mm\|truthfulqa\|0 | 0|sem |0.0165|± |0.0116|
|
| 101 |
+
|
merge_bench2/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1b5828617e531f3a6ac6ce5fbc09c0a67421d1ce7fc49e6c013ba14ef8718bc
|
| 3 |
+
size 3281478
|
merge_bench2/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:090c0e98ed333b292c484b5474942b04f5486d287d76e66c9e6b233936288c46
|
| 3 |
+
size 7643761
|
merge_bench2/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7db288d0c8962156958fa9d00fffd8b13ec27be1035ebd59b44e1186de2eb6e2
|
| 3 |
+
size 2650237
|