diff --git a/eval/0623_32k/logs/Llama3-8B.log b/eval/0623_32k/logs/Llama3-8B_base.log similarity index 100% rename from eval/0623_32k/logs/Llama3-8B.log rename to eval/0623_32k/logs/Llama3-8B_base.log diff --git a/eval/0623_32k/logs/Qwen2.5-14B.log b/eval/0623_32k/logs/Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..1e0914f7a00bbcab050fd468a5eeed1dd3f40875 --- /dev/null +++ b/eval/0623_32k/logs/Qwen2.5-14B.log @@ -0,0 +1,17 @@ +INFO 07-09 03:02:46 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 03:02:46 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 04:06:17 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 04:06:18 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |math_pass@1:1_samples|0.5467|± |0.0252| +| | |sem |0.4805|± |0.0176| +|mm\|aime24\|0 | 3|math_pass@1:1_samples|0.0667|± |0.0463| +|mm\|arc_challenge\|0| 0|sem |0.5939|± |0.0144| +|mm\|arc_easy\|0 | 0|sem |0.6103|± |0.0100| +|mm\|commonsenseqa\|0| 0|sem |0.6233|± |0.0139| +|mm\|gpqa_diamond\|0 | 2|sem |0.2828|± |0.0321| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8954|± |0.0084| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.6780|± |0.0209| +|mm\|truthfulqa\|0 | 0|sem |0.2920|± |0.0178| + diff --git a/eval/0623_32k/logs/R1-Qwen2.5-14B.log b/eval/0623_32k/logs/R1-Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..e479f709b36e6249e86fbc5447136881dc2a0d23 --- /dev/null +++ b/eval/0623_32k/logs/R1-Qwen2.5-14B.log @@ -0,0 +1,17 @@ +INFO 07-09 04:07:36 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 04:07:36 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 08:42:57 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 08:42:57 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7636|± |0.0149| +| | |math_pass@1:1_samples|0.8314|± |0.0360| +|mm\|aime24\|0 | 3|math_pass@1:1_samples|0.6333|± |0.0895| +|mm\|arc_challenge\|0| 0|sem |0.9326|± |0.0073| +|mm\|arc_easy\|0 | 0|sem |0.9726|± |0.0033| +|mm\|commonsenseqa\|0| 0|sem |0.7952|± |0.0116| +|mm\|gpqa_diamond\|0 | 2|sem |0.3990|± |0.0349| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9348|± |0.0068| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9260|± |0.0117| +|mm\|truthfulqa\|0 | 0|sem |0.7187|± |0.0176| + diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7f2c7b02af0436ccdfd18f190bf9c25e7517a6cd --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1e55a3e2c45a1d4e0392694cbe090e0cc5f1988e618951aa0948da92b476fff +size 3972288 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c97e27164738341d89924e75ac096a7252321729 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d1770ea1ff738146d419660ee2a627d71d7d27bcb4d96c71eaaef4353388c48 +size 425585 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..11651489b3b01c475b19f429d5aa707979dd20ae --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f676a8a5d9985a4a92a14e1cb209be43afa61cbba3513e74f85bdecaa9d8acd +size 740601 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d226c46c906785b8838b9883a0be0aec3ee4ddf9 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af59eb811818d4590f6c5c6f0d5d7bb2762c4aaa3eb44cdd2c721795c97cdf6c +size 286512 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b160add4168942a9958bc1821ad5edf6379bcb8a --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53725ee7d7ab39d1f69a1f817f79815a7f686ecd70b6e8104c85d5f9c6c47830 +size 166739 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..da97890077478b52bb5f49be9c3e185055f07477 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:819a97466693e2ddc8b9bbda94012ed8a33e8c7535a29612eb9ebaeb73b953f5 +size 3210094 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..57c496c0e4e2648cc7f34f49919168a05a7e5308 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d126d021f122cd7f710c6bc456ac60798b74f26eec5856f0e34d1358ee52552c +size 11227504 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f86bc9543b831de3271045ac45c01b8ce6916cf3 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a15ce77da42d90b664fbeea537cf2d012ed520d351db2847aeb972c6f61656a +size 234273 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|aime24|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|aime24|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d8887e1f5e30a21ffa7793600aca92b09be5978b --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|aime24|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01375969189bf887d2a4e3b3cda117e41ab55c89e4730b1642ac9af3f3cfdd84 +size 1041990 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|arc_challenge|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|arc_challenge|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5c8f1386ba35dbb5239516150de06bcf575f5760 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|arc_challenge|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28d16968dbdb6ebf67942b5e735b8e3cbad1e1e1db4d7414143a790ab1bbe873 +size 5522508 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|arc_easy|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|arc_easy|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3da8cb847346aa49487497693c6956e991a7ba64 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|arc_easy|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4bc9edd8d851e3c0bf2581800c82c47b351d20002d4edb84a5d76b0d5ff1e88 +size 10500507 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|commonsenseqa|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|commonsenseqa|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..654cfe2c29eb3f775165aed7f30d7a2ea8b43dda --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|commonsenseqa|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:220686cba5ea12d7a360093209a507139d65a9ddb4753e3cd44550b24e7f446c +size 3991832 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|gpqa_diamond|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|gpqa_diamond|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..aea20973b071f489004abebae55c322dda7fea3a --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|gpqa_diamond|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b560749d0cca261c15b5619a47c8c31396ee2debc8d370f3740a9ead4f414923 +size 4002615 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|gsm8k|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|gsm8k|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..93983684a61bffa26c772d6ec3f4fb287a2f86f2 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|gsm8k|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:112feb534470c91dc9de97f4c9695e22cb546a5b45c5c26d4b42febe383a7ac9 +size 7145704 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|math_500|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|math_500|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..03c56988d7b22b9d6ebf33ccf9dc1deaf70aa7b0 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|math_500|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41b4731b51412cc56b1fbcd011231d4dd625bd9bcf8f0bd1f65fe2bf4b0acf30 +size 9269723 diff --git a/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|truthfulqa|0_2025-07-09T03-02-48.058238.parquet b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|truthfulqa|0_2025-07-09T03-02-48.058238.parquet new file mode 100644 index 0000000000000000000000000000000000000000..465c52f03018fc69bb6f08fd8d5ff434ee1a96f4 --- /dev/null +++ b/eval/0623_32k/outputs/._models_Qwen2.5-14B/2025-07-09T03-02-48.058238/outputs_mm|truthfulqa|0_2025-07-09T03-02-48.058238.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87da79518d06e86df4fb8bb6122bd0e1f1cf16e02bd8ffd246e618121cd1677a +size 3163979 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fe59683d168d60de98db750fdd7950d85bfab1e4 --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:420f98d07475128cb910cc00b193563f4941e8f44849eaac112d4f4caebf5aa8 +size 4757253 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c1359b15217eb7eef97286bde8bcc69dfca82ae0 --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6176953957f8f8012b3b211a1a4def3452c2d42005d4c8372f6a631460c5edd1 +size 33069846 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c01eb1730b9fa557431ea6800996ad98728356de --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:883bf9772aa136be838e6346006b4884d294616b710d62f9733fccbd972cbf4a +size 44353064 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b4bc51dba42f0b13819b5338095b8aa0ff78cb95 --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8b40e26f15601f35e1d97e18cb15f43d27035ee3070605637210becaf40ba3c +size 57995814 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a93cfc09796b46202b23911c67a97edaf357f3ca --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ec61ac0618d91a83a0b194d143ada9926445fb0e65e59955c454ba206b96285 +size 41736491 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..20cbf7ee5fa8f90d714b4c4174c7c9df1702aa4a --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c2a9e7c64bd0b0521e29748b5404830020d0cc0f42e7a13c946b85e3d52c8cb +size 23294581 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e858c8ae78099232da1a7fde952e2f42aa0c232 --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f79ba8eaa3d0210b914bc42fd8e3b8cce6de9b32bfae294e134a8dd80a210f9 +size 23324916 diff --git a/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c804f2455ed700feaa30109f09f720c88dc125c6 --- /dev/null +++ b/eval/0623_32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2704e31f2ff48f8a29df5d1c2d9ee9760038972f3214884e349d9f76524531be +size 38574143 diff --git a/eval/0623_32k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/eval/0623_32k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..3b0c69dcf4de4a225987e12bc40147eb03bfedb1 --- /dev/null +++ b/eval/0623_32k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,89 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 0.9781144781144782, + "em_stderr": 0.0030022129253931924, + "qem": 0.9781144781144782, + "qem_stderr": 0.0030022129253931924, + "pem": 0.9781144781144782, + "pem_stderr": 0.0030022129253931924, + "pqem": 0.9823232323232324, + "pqem_stderr": 0.002703938613540887 + }, + "mm|aime24_c|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.398989898989899, + "em_stderr": 0.03488901616852732, + "qem": 0.398989898989899, + "qem_stderr": 0.03488901616852732, + "pem": 0.40404040404040403, + "pem_stderr": 0.03496130972056128, + "pqem": 0.5454545454545454, + "pqem_stderr": 0.03547601494006938 + }, + "mm|truthfulqa_c|0": { + "em": 0.6957186544342507, + "em_stderr": 0.018005197833249827, + "qem": 0.6957186544342507, + "qem_stderr": 0.018005197833249827, + "pem": 0.6957186544342507, + "pem_stderr": 0.018005197833249827, + "pqem": 0.753822629969419, + "pqem_stderr": 0.016857845089599713 + }, + "mm|arc_challenge_c|0": { + "em": 0.9385665529010239, + "em_stderr": 0.007017081676091902, + "qem": 0.9385665529010239, + "qem_stderr": 0.007017081676091902, + "pem": 0.9385665529010239, + "pem_stderr": 0.007017081676091902, + "pqem": 0.9564846416382252, + "pqem_stderr": 0.005961860846687841 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.5, + "math_pass@1:1_samples_stderr": 0.022383074051792257 + }, + "mm|commonsenseqa_c|0": { + "em": 0.8402948402948403, + "em_stderr": 0.01048806588260968, + "qem": 0.8402948402948403, + "qem_stderr": 0.01048806588260968, + "pem": 0.8402948402948403, + "pem_stderr": 0.01048806588260968, + "pqem": 0.8632268632268633, + "pqem_stderr": 0.009837459597699643 + }, + "mm|gsm8k_c|0": { + "math_pass@1:1_samples": 0.844579226686884, + "math_pass@1:1_samples_stderr": 0.009979689409499148 + }, + "all": { + "em": 0.7703368849468983, + "em_stderr": 0.014680314897174385, + "qem": 0.7703368849468983, + "qem_stderr": 0.014680314897174385, + "pem": 0.7713469859569994, + "pem_stderr": 0.014694773607581178, + "pqem": 0.8202623825224571, + "pqem_stderr": 0.014167423817519492, + "math_pass@1:1_samples": 0.44819307556229465, + "math_pass@1:1_samples_stderr": 0.010787587820430468 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/eval/0623_32k/results/._models_Qwen2.5-14B/results_2025-07-09T03-02-48.058238.json b/eval/0623_32k/results/._models_Qwen2.5-14B/results_2025-07-09T03-02-48.058238.json new file mode 100644 index 0000000000000000000000000000000000000000..cccdd194e743c84393289ad399ad1f333f7e0e99 --- /dev/null +++ b/eval/0623_32k/results/._models_Qwen2.5-14B/results_2025-07-09T03-02-48.058238.json @@ -0,0 +1,63 @@ +{ + "results": { + "mm|aime24|0": { + "math_pass@1:1_samples": 0.06666666666666667, + "math_pass@1:1_samples_stderr": 0.046320555585310084 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.678, + "math_pass@1:1_samples_stderr": 0.02091666833001988 + }, + "mm|truthfulqa|0": { + "sem": 0.29204892966360857, + "sem_stderr": 0.0177939694322578 + }, + "mm|arc_easy|0": { + "sem": 0.6102693602693603, + "sem_stderr": 0.010007169391797055 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.8953752843062927, + "math_pass@1:1_samples_stderr": 0.008430668082029257 + }, + "mm|arc_challenge|0": { + "sem": 0.5938566552901023, + "sem_stderr": 0.014351656690097863 + }, + "mm|commonsenseqa|0": { + "sem": 0.6232596232596233, + "sem_stderr": 0.013873168621535361 + }, + "mm|gpqa_diamond|0": { + "sem": 0.2828282828282828, + "sem_stderr": 0.03208779558786751 + }, + "all": { + "math_pass@1:1_samples": 0.5466806503243198, + "math_pass@1:1_samples_stderr": 0.025222630665786408, + "sem": 0.4804525702621955, + "sem_stderr": 0.017622751944711117 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|aime24|0": 30, + "mm|math_500|0": 500, + "mm|truthfulqa|0": 654, + "mm|arc_easy|0": 2376, + "mm|gsm8k|0": 1319, + "mm|arc_challenge|0": 1172, + "mm|commonsenseqa|0": 1221, + "mm|gpqa_diamond|0": 198 + } +} \ No newline at end of file diff --git a/eval/0623_32k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/eval/0623_32k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..235e39bf5aeec52213ffbbe6b3f7d0ad9a3d2b50 --- /dev/null +++ b/eval/0623_32k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,63 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9325938566552902, + "sem_stderr": 0.00732685450327276 + }, + "mm|aime24|0": { + "math_pass@1:1_samples": 0.6333333333333333, + "math_pass@1:1_samples_stderr": 0.08948554539839962 + }, + "mm|truthfulqa|0": { + "sem": 0.7186544342507645, + "sem_stderr": 0.017596386862110066 + }, + "mm|arc_easy|0": { + "sem": 0.9726430976430976, + "sem_stderr": 0.0033471749125014603 + }, + "mm|commonsenseqa|0": { + "sem": 0.7952497952497952, + "sem_stderr": 0.011552714477876674 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.926, + "math_pass@1:1_samples_stderr": 0.011718474529160436 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9347990902198635, + "math_pass@1:1_samples_stderr": 0.006800302989321107 + }, + "mm|gpqa_diamond|0": { + "sem": 0.398989898989899, + "sem_stderr": 0.03488901616852732 + }, + "all": { + "sem": 0.7636262165577692, + "sem_stderr": 0.014942429384857658, + "math_pass@1:1_samples": 0.8313774745177324, + "math_pass@1:1_samples_stderr": 0.03600144097229372 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 1172, + "mm|aime24|0": 30, + "mm|truthfulqa|0": 654, + "mm|arc_easy|0": 2376, + "mm|commonsenseqa|0": 1221, + "mm|math_500|0": 500, + "mm|gsm8k|0": 1319, + "mm|gpqa_diamond|0": 198 + } +} \ No newline at end of file diff --git a/merge_llama/logs/llama_dare_linear_1.log b/merge_llama/logs/llama_dare_linear_1.log index 528d5bff145acf0aad1437e5c4f15a36ac407c4c..903b6ab924ec2832fbcea5098825d4539057adad 100644 --- a/merge_llama/logs/llama_dare_linear_1.log +++ b/merge_llama/logs/llama_dare_linear_1.log @@ -1,91 +1,91 @@ -INFO 07-08 21:57:07 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 21:57:17 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'generate', 'reward', 'classify'}. Defaulting to 'generate'. -INFO 07-08 21:57:17 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 21:57:17 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 21:57:18 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_dare_linear_1', speculative_config=None, tokenizer='./merged2/llama_dare_linear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_dare_linear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 21:57:18 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 21:57:18 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_efa86c3a'), local_subscribe_addr='ipc:///tmp/427f2a48-5877-4905-9925-0c2b4d17785e', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 21:57:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0f005f40'), local_subscribe_addr='ipc:///tmp/80d4e5ea-4c66-4c15-b8ef-f7c70aaa4da2', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 21:57:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 21:57:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 21:57:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8e721de3'), local_subscribe_addr='ipc:///tmp/320e5e0d-55dd-4357-b9de-5cde0f6c36f7', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a0b7a43c'), local_subscribe_addr='ipc:///tmp/cfc24e0a-21eb-417d-b7d7-18d8486c08b9', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_51583d04'), local_subscribe_addr='ipc:///tmp/b246fe40-1357-44f1-a6a9-35733606dbe4', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=464442) WARNING 07-08 21:57:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=464441) WARNING 07-08 21:57:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=464440) WARNING 07-08 21:57:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=464439) WARNING 07-08 21:57:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_ed261c14'), local_subscribe_addr='ipc:///tmp/5fb92e36-f741-4d3d-b123-c5965ff43fe9', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:27 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:27 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:27 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:27 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=464442) WARNING 07-08 21:57:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=464440) WARNING 07-08 21:57:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=464441) WARNING 07-08 21:57:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=464439) WARNING 07-08 21:57:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_1... -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_1... -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_1... -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_1... -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:42 [loader.py:458] Loading weights took 15.47 seconds -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:42 [loader.py:458] Loading weights took 15.49 seconds -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:42 [loader.py:458] Loading weights took 15.48 seconds -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:42 [loader.py:458] Loading weights took 15.50 seconds -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:43 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.799646 seconds -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:43 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.797310 seconds -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:43 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.818653 seconds -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:43 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.782109 seconds -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:50 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c50dca8b5e/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:50 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c50dca8b5e/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:50 [backends.py:430] Dynamo bytecode transform time: 7.03 s -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:50 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c50dca8b5e/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:50 [backends.py:430] Dynamo bytecode transform time: 7.03 s -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:50 [backends.py:430] Dynamo bytecode transform time: 7.03 s -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:50 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c50dca8b5e/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:50 [backends.py:430] Dynamo bytecode transform time: 7.03 s -(VllmWorker rank=1 pid=464440) INFO 07-08 21:57:53 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=464439) INFO 07-08 21:57:53 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=464441) INFO 07-08 21:57:53 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=464442) INFO 07-08 21:57:53 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=464441) INFO 07-08 21:58:15 [backends.py:148] Compiling a graph for general shape takes 24.81 s -(VllmWorker rank=1 pid=464440) INFO 07-08 21:58:15 [backends.py:148] Compiling a graph for general shape takes 24.71 s -(VllmWorker rank=3 pid=464442) INFO 07-08 21:58:15 [backends.py:148] Compiling a graph for general shape takes 24.88 s -(VllmWorker rank=0 pid=464439) INFO 07-08 21:58:16 [backends.py:148] Compiling a graph for general shape takes 25.29 s -(VllmWorker rank=3 pid=464442) INFO 07-08 21:58:29 [monitor.py:33] torch.compile takes 31.91 s in total -(VllmWorker rank=0 pid=464439) INFO 07-08 21:58:29 [monitor.py:33] torch.compile takes 32.32 s in total -(VllmWorker rank=1 pid=464440) INFO 07-08 21:58:29 [monitor.py:33] torch.compile takes 31.74 s in total -(VllmWorker rank=2 pid=464441) INFO 07-08 21:58:29 [monitor.py:33] torch.compile takes 31.84 s in total -INFO 07-08 21:58:30 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 21:58:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 21:58:30 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 21:58:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 21:58:30 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 21:58:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 21:58:30 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 21:58:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=3 pid=464442) INFO 07-08 21:59:02 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 2.44 GiB -(VllmWorker rank=2 pid=464441) INFO 07-08 21:59:02 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 2.44 GiB -(VllmWorker rank=0 pid=464439) INFO 07-08 21:59:02 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 2.44 GiB -(VllmWorker rank=1 pid=464440) INFO 07-08 21:59:02 [gpu_model_runner.py:1686] Graph capturing finished in 32 secs, took 2.44 GiB -INFO 07-08 21:59:02 [core.py:159] init engine (profile, create kv cache, warmup model) took 79.81 seconds -INFO 07-08 21:59:03 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 22:10:10 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 22:10:10 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 16:08:37 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 16:08:46 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'embed', 'generate', 'classify'}. Defaulting to 'generate'. +INFO 07-09 16:08:46 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 16:08:46 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 16:08:47 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_dare_linear_1', speculative_config=None, tokenizer='./merged_llama/llama_dare_linear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_dare_linear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 16:08:47 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 16:08:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_ca112eda'), local_subscribe_addr='ipc:///tmp/344936ee-be73-4410-94fe-a17cf6cb2940', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:08:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=833517) INFO 07-09 16:08:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0814f70e'), local_subscribe_addr='ipc:///tmp/b533fae6-c563-4df2-81af-f47351ef77d8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:08:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=833520) INFO 07-09 16:08:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4d0f3779'), local_subscribe_addr='ipc:///tmp/6f15b09a-e194-4140-a122-e2f5de35c57e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:08:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 16:08:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=833526) INFO 07-09 16:08:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_143d3603'), local_subscribe_addr='ipc:///tmp/f102b9bc-2963-4a55-88ea-4520368ca3bc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=833525) INFO 07-09 16:08:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_55386cce'), local_subscribe_addr='ipc:///tmp/14bb196f-c54d-4d60-9243-e7ce7b6a6239', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=833517) INFO 07-09 16:08:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=833520) INFO 07-09 16:08:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=833517) INFO 07-09 16:08:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=833520) INFO 07-09 16:08:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=833526) INFO 07-09 16:08:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=833526) INFO 07-09 16:08:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=833525) INFO 07-09 16:08:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=833525) INFO 07-09 16:08:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=833526) WARNING 07-09 16:08:50 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=833525) WARNING 07-09 16:08:50 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=833520) WARNING 07-09 16:08:50 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=833517) WARNING 07-09 16:08:50 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=833517) INFO 07-09 16:08:50 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_7839edaa'), local_subscribe_addr='ipc:///tmp/b04ab57c-58c0-47b6-a6fe-9caab659c14d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=833526) INFO 07-09 16:08:50 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=833520) INFO 07-09 16:08:50 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=833517) INFO 07-09 16:08:50 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=833525) INFO 07-09 16:08:50 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=833526) INFO 07-09 16:08:50 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=833526) WARNING 07-09 16:08:50 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=833525) INFO 07-09 16:08:50 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=833517) INFO 07-09 16:08:50 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=833520) INFO 07-09 16:08:50 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=833525) WARNING 07-09 16:08:50 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=833517) WARNING 07-09 16:08:50 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=833520) WARNING 07-09 16:08:50 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=833526) INFO 07-09 16:08:50 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_1... +(VllmWorker rank=2 pid=833525) INFO 07-09 16:08:50 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_1... +(VllmWorker rank=1 pid=833520) INFO 07-09 16:08:50 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_1... +(VllmWorker rank=0 pid=833517) INFO 07-09 16:08:50 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_1... +(VllmWorker rank=2 pid=833525) INFO 07-09 16:09:04 [loader.py:458] Loading weights took 13.46 seconds +(VllmWorker rank=0 pid=833517) INFO 07-09 16:09:04 [loader.py:458] Loading weights took 13.55 seconds +(VllmWorker rank=3 pid=833526) INFO 07-09 16:09:04 [loader.py:458] Loading weights took 13.59 seconds +(VllmWorker rank=1 pid=833520) INFO 07-09 16:09:04 [loader.py:458] Loading weights took 13.55 seconds +(VllmWorker rank=2 pid=833525) INFO 07-09 16:09:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.715835 seconds +(VllmWorker rank=3 pid=833526) INFO 07-09 16:09:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.818319 seconds +(VllmWorker rank=0 pid=833517) INFO 07-09 16:09:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.810882 seconds +(VllmWorker rank=1 pid=833520) INFO 07-09 16:09:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.817955 seconds +(VllmWorker rank=3 pid=833526) INFO 07-09 16:09:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac0148a4f7/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=833526) INFO 07-09 16:09:11 [backends.py:430] Dynamo bytecode transform time: 6.39 s +(VllmWorker rank=1 pid=833520) INFO 07-09 16:09:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac0148a4f7/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=833520) INFO 07-09 16:09:11 [backends.py:430] Dynamo bytecode transform time: 6.41 s +(VllmWorker rank=2 pid=833525) INFO 07-09 16:09:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac0148a4f7/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=833525) INFO 07-09 16:09:11 [backends.py:430] Dynamo bytecode transform time: 6.50 s +(VllmWorker rank=0 pid=833517) INFO 07-09 16:09:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac0148a4f7/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=833517) INFO 07-09 16:09:11 [backends.py:430] Dynamo bytecode transform time: 6.54 s +(VllmWorker rank=3 pid=833526) INFO 07-09 16:09:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=833520) INFO 07-09 16:09:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=833525) INFO 07-09 16:09:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=833517) INFO 07-09 16:09:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=833520) INFO 07-09 16:09:35 [backends.py:148] Compiling a graph for general shape takes 24.17 s +(VllmWorker rank=3 pid=833526) INFO 07-09 16:09:36 [backends.py:148] Compiling a graph for general shape takes 24.34 s +(VllmWorker rank=2 pid=833525) INFO 07-09 16:09:36 [backends.py:148] Compiling a graph for general shape takes 24.19 s +(VllmWorker rank=0 pid=833517) INFO 07-09 16:09:36 [backends.py:148] Compiling a graph for general shape takes 24.41 s +(VllmWorker rank=0 pid=833517) INFO 07-09 16:09:49 [monitor.py:33] torch.compile takes 30.95 s in total +(VllmWorker rank=1 pid=833520) INFO 07-09 16:09:49 [monitor.py:33] torch.compile takes 30.58 s in total +(VllmWorker rank=3 pid=833526) INFO 07-09 16:09:49 [monitor.py:33] torch.compile takes 30.73 s in total +(VllmWorker rank=2 pid=833525) INFO 07-09 16:09:49 [monitor.py:33] torch.compile takes 30.70 s in total +INFO 07-09 16:09:50 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 16:09:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 16:09:50 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:09:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:09:50 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:09:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:09:50 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 16:09:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=2 pid=833525) INFO 07-09 16:10:20 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=0 pid=833517) INFO 07-09 16:10:20 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=1 pid=833520) INFO 07-09 16:10:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +(VllmWorker rank=3 pid=833526) INFO 07-09 16:10:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +INFO 07-09 16:10:20 [core.py:159] init engine (profile, create kv cache, warmup model) took 75.65 seconds +INFO 07-09 16:10:20 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 16:21:28 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:21:28 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value| |Stderr| |------------------|------:|---------------------|----:|---|-----:| |all | |math_pass@1:1_samples| 0|± | 0| diff --git a/merge_llama/logs/llama_dare_linear_3.log b/merge_llama/logs/llama_dare_linear_3.log index d3886d6abe61eb29192e6fdf00e19c1f961255a3..3b05ceac338a1d91b95b5f20c12e7a8b6da325ad 100644 --- a/merge_llama/logs/llama_dare_linear_3.log +++ b/merge_llama/logs/llama_dare_linear_3.log @@ -1,91 +1,91 @@ -INFO 07-08 22:10:09 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 22:10:17 [config.py:717] This model supports multiple tasks: {'generate', 'score', 'embed', 'classify', 'reward'}. Defaulting to 'generate'. -INFO 07-08 22:10:17 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 22:10:17 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 22:10:19 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_dare_linear_3', speculative_config=None, tokenizer='./merged2/llama_dare_linear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_dare_linear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 22:10:19 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 22:10:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_1555f65d'), local_subscribe_addr='ipc:///tmp/6e528593-82c3-4270-944f-a35f93f9de59', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:10:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_219c95bf'), local_subscribe_addr='ipc:///tmp/efd0d5d1-e040-42c6-b9a2-313bba811e51', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:10:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8726dac0'), local_subscribe_addr='ipc:///tmp/8d3af5b5-b3b4-4297-9863-eb3f56a06ad9', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:10:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 22:10:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1223b949'), local_subscribe_addr='ipc:///tmp/6bc83f99-8054-4666-96eb-c76bef750739', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b6481fc7'), local_subscribe_addr='ipc:///tmp/73405b47-4cb4-488c-847e-40e382f91067', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:21 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:21 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:21 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:21 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:22 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:22 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:22 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:22 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=469317) WARNING 07-08 22:10:22 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=469318) WARNING 07-08 22:10:22 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=469316) WARNING 07-08 22:10:22 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=469315) WARNING 07-08 22:10:22 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_ccf24d71'), local_subscribe_addr='ipc:///tmp/a6ef8cea-1071-4abf-8588-0ba47c52e58b', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:22 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:22 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:22 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:22 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:22 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:22 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=469318) WARNING 07-08 22:10:22 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:22 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:22 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=469317) WARNING 07-08 22:10:22 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=469316) WARNING 07-08 22:10:22 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=469315) WARNING 07-08 22:10:22 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:22 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_3... -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:22 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_3... -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:22 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_3... -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:22 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_3... -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:41 [loader.py:458] Loading weights took 18.51 seconds -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:41 [loader.py:458] Loading weights took 18.59 seconds -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:41 [loader.py:458] Loading weights took 18.53 seconds -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:41 [loader.py:458] Loading weights took 18.53 seconds -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 18.743890 seconds -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 18.822089 seconds -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 18.814975 seconds -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 18.813055 seconds -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7161fb40be/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:48 [backends.py:430] Dynamo bytecode transform time: 6.46 s -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7161fb40be/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:48 [backends.py:430] Dynamo bytecode transform time: 6.49 s -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7161fb40be/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:48 [backends.py:430] Dynamo bytecode transform time: 6.54 s -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7161fb40be/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:48 [backends.py:430] Dynamo bytecode transform time: 6.57 s -(VllmWorker rank=0 pid=469315) INFO 07-08 22:10:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=469318) INFO 07-08 22:10:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=469316) INFO 07-08 22:10:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=469317) INFO 07-08 22:10:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=469316) INFO 07-08 22:11:13 [backends.py:148] Compiling a graph for general shape takes 24.65 s -(VllmWorker rank=2 pid=469317) INFO 07-08 22:11:13 [backends.py:148] Compiling a graph for general shape takes 24.86 s -(VllmWorker rank=3 pid=469318) INFO 07-08 22:11:13 [backends.py:148] Compiling a graph for general shape takes 25.04 s -(VllmWorker rank=0 pid=469315) INFO 07-08 22:11:14 [backends.py:148] Compiling a graph for general shape takes 25.18 s -(VllmWorker rank=1 pid=469316) INFO 07-08 22:11:27 [monitor.py:33] torch.compile takes 31.19 s in total -(VllmWorker rank=2 pid=469317) INFO 07-08 22:11:27 [monitor.py:33] torch.compile takes 31.43 s in total -(VllmWorker rank=3 pid=469318) INFO 07-08 22:11:27 [monitor.py:33] torch.compile takes 31.52 s in total -(VllmWorker rank=0 pid=469315) INFO 07-08 22:11:27 [monitor.py:33] torch.compile takes 31.64 s in total -INFO 07-08 22:11:28 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 22:11:28 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 22:11:28 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:11:28 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:11:28 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:11:28 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:11:28 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 22:11:28 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=2 pid=469317) INFO 07-08 22:11:58 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB -(VllmWorker rank=1 pid=469316) INFO 07-08 22:11:58 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB -(VllmWorker rank=3 pid=469318) INFO 07-08 22:11:58 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB -(VllmWorker rank=0 pid=469315) INFO 07-08 22:11:58 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB -INFO 07-08 22:11:58 [core.py:159] init engine (profile, create kv cache, warmup model) took 76.56 seconds -INFO 07-08 22:11:58 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 22:23:01 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 22:23:01 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 16:21:27 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 16:21:35 [config.py:717] This model supports multiple tasks: {'generate', 'classify', 'embed', 'reward', 'score'}. Defaulting to 'generate'. +INFO 07-09 16:21:36 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 16:21:36 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 16:21:37 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_dare_linear_3', speculative_config=None, tokenizer='./merged_llama/llama_dare_linear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_dare_linear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 16:21:37 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 16:21:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_9e12ddcf'), local_subscribe_addr='ipc:///tmp/14ac47e5-841a-40d3-aad5-360539e70a2b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:21:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c94beefb'), local_subscribe_addr='ipc:///tmp/6a4559cf-68ab-4cb2-97d2-e07c955e3d2b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:21:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_88eb10f0'), local_subscribe_addr='ipc:///tmp/1511a552-aaae-412e-888d-bcbbd9859f8f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:21:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 16:21:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cfb6d2f9'), local_subscribe_addr='ipc:///tmp/f951d7a7-1f31-4ed2-ab35-d8ce683836ad', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6f3af099'), local_subscribe_addr='ipc:///tmp/9820ab11-58b1-4368-af8c-77ff62af3fcc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=838278) WARNING 07-09 16:21:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=838277) WARNING 07-09 16:21:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=838275) WARNING 07-09 16:21:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=838276) WARNING 07-09 16:21:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_baf0c824'), local_subscribe_addr='ipc:///tmp/25149bca-d57e-482d-9376-c96e83dee27b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:39 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:39 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:39 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:39 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=838278) WARNING 07-09 16:21:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=838277) WARNING 07-09 16:21:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=838275) WARNING 07-09 16:21:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=838276) WARNING 07-09 16:21:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:39 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_3... +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:39 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_3... +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:39 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_3... +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:39 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_3... +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:53 [loader.py:458] Loading weights took 13.78 seconds +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:53 [loader.py:458] Loading weights took 13.90 seconds +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:53 [loader.py:458] Loading weights took 13.90 seconds +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:53 [loader.py:458] Loading weights took 13.86 seconds +(VllmWorker rank=0 pid=838275) INFO 07-09 16:21:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.060790 seconds +(VllmWorker rank=3 pid=838278) INFO 07-09 16:21:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.126021 seconds +(VllmWorker rank=2 pid=838277) INFO 07-09 16:21:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.127061 seconds +(VllmWorker rank=1 pid=838276) INFO 07-09 16:21:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.127224 seconds +(VllmWorker rank=1 pid=838276) INFO 07-09 16:22:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f25c89fed2/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=838276) INFO 07-09 16:22:00 [backends.py:430] Dynamo bytecode transform time: 6.44 s +(VllmWorker rank=0 pid=838275) INFO 07-09 16:22:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f25c89fed2/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=838275) INFO 07-09 16:22:00 [backends.py:430] Dynamo bytecode transform time: 6.50 s +(VllmWorker rank=3 pid=838278) INFO 07-09 16:22:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f25c89fed2/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=838278) INFO 07-09 16:22:00 [backends.py:430] Dynamo bytecode transform time: 6.51 s +(VllmWorker rank=2 pid=838277) INFO 07-09 16:22:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f25c89fed2/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=838277) INFO 07-09 16:22:00 [backends.py:430] Dynamo bytecode transform time: 6.57 s +(VllmWorker rank=1 pid=838276) INFO 07-09 16:22:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=838278) INFO 07-09 16:22:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=838275) INFO 07-09 16:22:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=838277) INFO 07-09 16:22:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=838276) INFO 07-09 16:22:26 [backends.py:148] Compiling a graph for general shape takes 24.85 s +(VllmWorker rank=3 pid=838278) INFO 07-09 16:22:26 [backends.py:148] Compiling a graph for general shape takes 24.94 s +(VllmWorker rank=0 pid=838275) INFO 07-09 16:22:26 [backends.py:148] Compiling a graph for general shape takes 25.06 s +(VllmWorker rank=2 pid=838277) INFO 07-09 16:22:26 [backends.py:148] Compiling a graph for general shape takes 25.11 s +(VllmWorker rank=0 pid=838275) INFO 07-09 16:22:39 [monitor.py:33] torch.compile takes 31.56 s in total +(VllmWorker rank=1 pid=838276) INFO 07-09 16:22:39 [monitor.py:33] torch.compile takes 31.29 s in total +(VllmWorker rank=3 pid=838278) INFO 07-09 16:22:39 [monitor.py:33] torch.compile takes 31.45 s in total +(VllmWorker rank=2 pid=838277) INFO 07-09 16:22:39 [monitor.py:33] torch.compile takes 31.68 s in total +INFO 07-09 16:22:41 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 16:22:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 16:22:41 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:22:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:22:41 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:22:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:22:41 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 16:22:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=0 pid=838275) INFO 07-09 16:23:16 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 2.44 GiB +(VllmWorker rank=2 pid=838277) INFO 07-09 16:23:16 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 2.44 GiB +(VllmWorker rank=3 pid=838278) INFO 07-09 16:23:16 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 2.44 GiB +(VllmWorker rank=1 pid=838276) INFO 07-09 16:23:17 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 2.44 GiB +INFO 07-09 16:23:17 [core.py:159] init engine (profile, create kv cache, warmup model) took 82.84 seconds +INFO 07-09 16:23:17 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 16:34:24 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:34:24 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value| |Stderr| |------------------|------:|---------------------|----:|---|-----:| |all | |math_pass@1:1_samples| 0|± | 0| diff --git a/merge_llama/logs/llama_dare_linear_5.log b/merge_llama/logs/llama_dare_linear_5.log index 7f7f9925f5f0fcff599cfd9fc2fbf1b2f9d9832d..7c16207b60b9bf118937ca9fd5fb164df77e5c16 100644 --- a/merge_llama/logs/llama_dare_linear_5.log +++ b/merge_llama/logs/llama_dare_linear_5.log @@ -1,100 +1,88 @@ -INFO 07-08 22:23:00 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 22:23:09 [config.py:717] This model supports multiple tasks: {'classify', 'score', 'generate', 'reward', 'embed'}. Defaulting to 'generate'. -INFO 07-08 22:23:09 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 22:23:09 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 22:23:11 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_dare_linear_5', speculative_config=None, tokenizer='./merged2/llama_dare_linear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_dare_linear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 22:23:11 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 22:23:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e50cf4a7'), local_subscribe_addr='ipc:///tmp/d46eed58-9637-42f3-a623-ff2e80a2a95a', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:23:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 22:23:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 22:23:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5006b0bf'), local_subscribe_addr='ipc:///tmp/af3a3e8f-8905-4c77-b9a0-acd320d8995b', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c0cb8e5d'), local_subscribe_addr='ipc:///tmp/f9fe5cab-8925-4085-a1b5-e0ffa80568e8', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:23:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7c261c90'), local_subscribe_addr='ipc:///tmp/61907bbf-e308-41fd-847f-343f4dd24474', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_015a357e'), local_subscribe_addr='ipc:///tmp/80ded898-8c80-4451-b2c7-e1408a8739a9', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:18 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:18 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:18 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:18 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:18 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:18 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:18 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:18 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=476078) WARNING 07-08 22:23:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=476077) WARNING 07-08 22:23:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=476075) WARNING 07-08 22:23:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=476076) WARNING 07-08 22:23:19 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_8e902508'), local_subscribe_addr='ipc:///tmp/d8f42dfb-3e52-4cd7-b7ba-671fc5da2e8e', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:19 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:19 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:19 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:19 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:19 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:19 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=476077) WARNING 07-08 22:23:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=476078) WARNING 07-08 22:23:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:19 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:19 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=476076) WARNING 07-08 22:23:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=476075) WARNING 07-08 22:23:19 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:19 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_5... -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:19 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_5... -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:19 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_5... -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:19 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_5... -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:36 [loader.py:458] Loading weights took 17.29 seconds -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:36 [loader.py:458] Loading weights took 17.40 seconds -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:36 [loader.py:458] Loading weights took 17.40 seconds -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:37 [loader.py:458] Loading weights took 17.37 seconds -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:37 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.619191 seconds -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:37 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.616676 seconds -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:37 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.631825 seconds -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:37 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.556969 seconds -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f39d1a17b9/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:43 [backends.py:430] Dynamo bytecode transform time: 6.39 s -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f39d1a17b9/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:43 [backends.py:430] Dynamo bytecode transform time: 6.45 s -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f39d1a17b9/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:43 [backends.py:430] Dynamo bytecode transform time: 6.50 s -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f39d1a17b9/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:43 [backends.py:430] Dynamo bytecode transform time: 6.55 s -(VllmWorker rank=3 pid=476078) INFO 07-08 22:23:47 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=476077) INFO 07-08 22:23:47 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=476075) INFO 07-08 22:23:47 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=476076) INFO 07-08 22:23:47 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=476078) INFO 07-08 22:24:08 [backends.py:148] Compiling a graph for general shape takes 24.31 s -(VllmWorker rank=2 pid=476077) INFO 07-08 22:24:08 [backends.py:148] Compiling a graph for general shape takes 24.38 s -(VllmWorker rank=0 pid=476075) INFO 07-08 22:24:09 [backends.py:148] Compiling a graph for general shape takes 25.14 s -(VllmWorker rank=1 pid=476076) INFO 07-08 22:24:10 [backends.py:148] Compiling a graph for general shape takes 25.49 s -(VllmWorker rank=0 pid=476075) INFO 07-08 22:24:23 [monitor.py:33] torch.compile takes 31.69 s in total -(VllmWorker rank=1 pid=476076) INFO 07-08 22:24:23 [monitor.py:33] torch.compile takes 31.99 s in total -(VllmWorker rank=3 pid=476078) INFO 07-08 22:24:23 [monitor.py:33] torch.compile takes 30.70 s in total -(VllmWorker rank=2 pid=476077) INFO 07-08 22:24:23 [monitor.py:33] torch.compile takes 30.83 s in total -INFO 07-08 22:24:25 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 22:24:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 22:24:25 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:24:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:24:25 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:24:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:24:25 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 22:24:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=3 pid=476078) INFO 07-08 22:25:05 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=2 pid=476077) INFO 07-08 22:25:05 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=1 pid=476076) INFO 07-08 22:25:05 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=0 pid=476075) INFO 07-08 22:25:05 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -INFO 07-08 22:25:05 [core.py:159] init engine (profile, create kv cache, warmup model) took 88.22 seconds -INFO 07-08 22:25:05 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 22:36:30 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 22:36:30 [__init__.py:239] Automatically detected platform cuda. -| Task |Version| Metric |Value| |Stderr| -|------------------|------:|---------------------|----:|---|-----:| -|all | |math_pass@1:1_samples| 0|± | 0| -| | |sem | 0|± | 0| -|mm\|arc_challenge\|0| 0|sem | 0|± | 0| -|mm\|arc_easy\|0 | 0|sem | 0|± | 0| -|mm\|commonsenseqa\|0| 0|sem | 0|± | 0| -|mm\|gpqa_diamond\|0 | 2|sem | 0| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples| 0|± | 0| -|mm\|math_500\|0 | 3|math_pass@1:1_samples| 0|± | 0| -|mm\|truthfulqa\|0 | 0|sem | 0|± | 0| - +INFO 07-09 16:34:23 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 16:34:32 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'classify', 'embed', 'generate'}. Defaulting to 'generate'. +INFO 07-09 16:34:32 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 16:34:32 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 16:34:34 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_dare_linear_5', speculative_config=None, tokenizer='./merged_llama/llama_dare_linear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_dare_linear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 16:34:34 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 16:34:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_3dbaa43f'), local_subscribe_addr='ipc:///tmp/0a34ebb6-7014-422b-ba2d-b66b7fa44bc2', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:34:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5c057a8a'), local_subscribe_addr='ipc:///tmp/b2a80ceb-172a-4134-8cae-a66d017d3857', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:34:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e00218f0'), local_subscribe_addr='ipc:///tmp/d679392f-914d-47e8-9a15-38de271f91c8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:34:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 16:34:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_345d0557'), local_subscribe_addr='ipc:///tmp/1ac87574-00af-4a33-95b3-eaaefb5a7094', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7703380a'), local_subscribe_addr='ipc:///tmp/e4bf7ccb-e0c3-4d75-bb63-b7dbeb9e2787', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=877754) WARNING 07-09 16:34:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=877753) WARNING 07-09 16:34:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=877751) WARNING 07-09 16:34:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=877752) WARNING 07-09 16:34:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_3390f3bb'), local_subscribe_addr='ipc:///tmp/029e6002-6dcf-4f72-8e3c-1d6714280ebb', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:36 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:36 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:36 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:36 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=877753) WARNING 07-09 16:34:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=877754) WARNING 07-09 16:34:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=877751) WARNING 07-09 16:34:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=877752) WARNING 07-09 16:34:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:36 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_5... +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:36 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_5... +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:36 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_5... +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:36 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_5... +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:49 [loader.py:458] Loading weights took 12.93 seconds +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:49 [loader.py:458] Loading weights took 13.08 seconds +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:49 [loader.py:458] Loading weights took 13.08 seconds +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:49 [loader.py:458] Loading weights took 13.07 seconds +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:49 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.307056 seconds +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:49 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.307105 seconds +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:49 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.305791 seconds +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:49 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.195603 seconds +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/38e4d9d20f/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/38e4d9d20f/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/38e4d9d20f/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:56 [backends.py:430] Dynamo bytecode transform time: 6.64 s +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:56 [backends.py:430] Dynamo bytecode transform time: 6.64 s +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:56 [backends.py:430] Dynamo bytecode transform time: 6.64 s +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/38e4d9d20f/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:56 [backends.py:430] Dynamo bytecode transform time: 6.65 s +(VllmWorker rank=0 pid=877751) INFO 07-09 16:34:59 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=877754) INFO 07-09 16:34:59 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=877753) INFO 07-09 16:34:59 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=877752) INFO 07-09 16:34:59 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=877753) INFO 07-09 16:35:21 [backends.py:148] Compiling a graph for general shape takes 24.25 s +(VllmWorker rank=3 pid=877754) INFO 07-09 16:35:21 [backends.py:148] Compiling a graph for general shape takes 24.26 s +(VllmWorker rank=0 pid=877751) INFO 07-09 16:35:21 [backends.py:148] Compiling a graph for general shape takes 24.47 s +(VllmWorker rank=1 pid=877752) INFO 07-09 16:35:21 [backends.py:148] Compiling a graph for general shape takes 24.46 s +(VllmWorker rank=0 pid=877751) INFO 07-09 16:35:34 [monitor.py:33] torch.compile takes 31.12 s in total +(VllmWorker rank=1 pid=877752) INFO 07-09 16:35:34 [monitor.py:33] torch.compile takes 31.11 s in total +(VllmWorker rank=3 pid=877754) INFO 07-09 16:35:34 [monitor.py:33] torch.compile takes 30.91 s in total +(VllmWorker rank=2 pid=877753) INFO 07-09 16:35:34 [monitor.py:33] torch.compile takes 30.90 s in total +INFO 07-09 16:35:36 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 16:35:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 16:35:36 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:35:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:35:36 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:35:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:35:36 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 16:35:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=1 pid=877752) INFO 07-09 16:36:06 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +(VllmWorker rank=3 pid=877754) INFO 07-09 16:36:06 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +(VllmWorker rank=2 pid=877753) INFO 07-09 16:36:06 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +(VllmWorker rank=0 pid=877751) INFO 07-09 16:36:06 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +INFO 07-09 16:36:06 [core.py:159] init engine (profile, create kv cache, warmup model) took 76.65 seconds +INFO 07-09 16:36:06 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 16:47:14 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:47:14 [__init__.py:239] Automatically detected platform cuda. diff --git a/merge_llama/logs/llama_dare_linear_7.log b/merge_llama/logs/llama_dare_linear_7.log index 0a820e36d14c9d4dafd806011317ab9817a293c1..4d110029e72a985ef0ab29e6eab0571cd8f62e19 100644 --- a/merge_llama/logs/llama_dare_linear_7.log +++ b/merge_llama/logs/llama_dare_linear_7.log @@ -1,91 +1,91 @@ -INFO 07-08 22:36:29 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 22:36:39 [config.py:717] This model supports multiple tasks: {'generate', 'score', 'embed', 'reward', 'classify'}. Defaulting to 'generate'. -INFO 07-08 22:36:39 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 22:36:39 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 22:36:40 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_dare_linear_7', speculative_config=None, tokenizer='./merged2/llama_dare_linear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_dare_linear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 22:36:40 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 22:36:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_25695ef2'), local_subscribe_addr='ipc:///tmp/e21d0519-544d-4ccf-b895-9703412415c5', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:36:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_99f2f2b7'), local_subscribe_addr='ipc:///tmp/847f7cc7-4f2f-4b7a-80f3-b149b5c62813', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:36:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 22:36:41 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0af9a286'), local_subscribe_addr='ipc:///tmp/0cd726b6-374d-49ec-89d9-d3c02a4e6949', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:36:41 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_87b5682e'), local_subscribe_addr='ipc:///tmp/f6619b64-b4b7-44c0-b963-6c664c27e353', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b3f062fa'), local_subscribe_addr='ipc:///tmp/41b644d0-a138-43a8-ac2f-eba8f3399b4f', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:48 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:48 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:48 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:48 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:49 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:49 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:49 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:49 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=482152) WARNING 07-08 22:36:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=482153) WARNING 07-08 22:36:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=482154) WARNING 07-08 22:36:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=482155) WARNING 07-08 22:36:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_4ed93110'), local_subscribe_addr='ipc:///tmp/dc24726f-99f5-4074-8be5-4f77646d9894', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:49 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:49 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:49 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:49 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:49 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:49 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=482152) WARNING 07-08 22:36:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:49 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=482153) WARNING 07-08 22:36:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:49 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=482154) WARNING 07-08 22:36:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=482155) WARNING 07-08 22:36:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:49 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_7... -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:49 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_7... -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:49 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_7... -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:49 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_7... -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:53 [loader.py:458] Loading weights took 3.24 seconds -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:53 [loader.py:458] Loading weights took 3.42 seconds -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:53 [loader.py:458] Loading weights took 3.41 seconds -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:53 [loader.py:458] Loading weights took 3.43 seconds -(VllmWorker rank=3 pid=482155) INFO 07-08 22:36:53 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 3.471464 seconds -(VllmWorker rank=0 pid=482152) INFO 07-08 22:36:53 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 3.660119 seconds -(VllmWorker rank=2 pid=482154) INFO 07-08 22:36:53 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 3.668374 seconds -(VllmWorker rank=1 pid=482153) INFO 07-08 22:36:53 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 3.667559 seconds -(VllmWorker rank=2 pid=482154) INFO 07-08 22:37:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/50735dc362/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=482154) INFO 07-08 22:37:00 [backends.py:430] Dynamo bytecode transform time: 6.44 s -(VllmWorker rank=1 pid=482153) INFO 07-08 22:37:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/50735dc362/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=482153) INFO 07-08 22:37:00 [backends.py:430] Dynamo bytecode transform time: 6.48 s -(VllmWorker rank=3 pid=482155) INFO 07-08 22:37:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/50735dc362/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=482155) INFO 07-08 22:37:00 [backends.py:430] Dynamo bytecode transform time: 6.51 s -(VllmWorker rank=0 pid=482152) INFO 07-08 22:37:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/50735dc362/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=482152) INFO 07-08 22:37:00 [backends.py:430] Dynamo bytecode transform time: 6.52 s -(VllmWorker rank=2 pid=482154) INFO 07-08 22:37:03 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=482152) INFO 07-08 22:37:03 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=482155) INFO 07-08 22:37:03 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=482153) INFO 07-08 22:37:03 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=482152) INFO 07-08 22:37:25 [backends.py:148] Compiling a graph for general shape takes 24.50 s -(VllmWorker rank=2 pid=482154) INFO 07-08 22:37:25 [backends.py:148] Compiling a graph for general shape takes 24.75 s -(VllmWorker rank=3 pid=482155) INFO 07-08 22:37:25 [backends.py:148] Compiling a graph for general shape takes 24.75 s -(VllmWorker rank=1 pid=482153) INFO 07-08 22:37:25 [backends.py:148] Compiling a graph for general shape takes 24.68 s -(VllmWorker rank=0 pid=482152) INFO 07-08 22:37:38 [monitor.py:33] torch.compile takes 31.01 s in total -(VllmWorker rank=2 pid=482154) INFO 07-08 22:37:38 [monitor.py:33] torch.compile takes 31.19 s in total -(VllmWorker rank=1 pid=482153) INFO 07-08 22:37:38 [monitor.py:33] torch.compile takes 31.17 s in total -(VllmWorker rank=3 pid=482155) INFO 07-08 22:37:38 [monitor.py:33] torch.compile takes 31.26 s in total -INFO 07-08 22:37:40 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 22:37:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 22:37:40 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:37:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:37:40 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:37:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:37:40 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 22:37:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=1 pid=482153) INFO 07-08 22:38:10 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=2 pid=482154) INFO 07-08 22:38:10 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=3 pid=482155) INFO 07-08 22:38:10 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=0 pid=482152) INFO 07-08 22:38:10 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -INFO 07-08 22:38:10 [core.py:159] init engine (profile, create kv cache, warmup model) took 76.46 seconds -INFO 07-08 22:38:10 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 22:49:16 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 22:49:16 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 16:47:13 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 16:47:22 [config.py:717] This model supports multiple tasks: {'classify', 'reward', 'generate', 'score', 'embed'}. Defaulting to 'generate'. +INFO 07-09 16:47:22 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 16:47:22 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 16:47:23 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_dare_linear_7', speculative_config=None, tokenizer='./merged_llama/llama_dare_linear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_dare_linear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 16:47:23 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 16:47:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_27e6be88'), local_subscribe_addr='ipc:///tmp/5bc4e407-c3cf-4276-ba74-1179ceb7b941', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:47:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5a5c3860'), local_subscribe_addr='ipc:///tmp/fa5da8d3-7620-4766-b56f-b962d9181a70', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:47:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 16:47:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ccb5d948'), local_subscribe_addr='ipc:///tmp/91e03ba6-54cf-4e32-a629-05f82382b5f9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 16:47:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0467e04e'), local_subscribe_addr='ipc:///tmp/c110dcb2-2976-453d-9495-71da41c8fe22', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_192001e2'), local_subscribe_addr='ipc:///tmp/dbf48140-4e74-4f2c-b0b0-11332adf9c31', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=883375) WARNING 07-09 16:47:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=883376) WARNING 07-09 16:47:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=883374) WARNING 07-09 16:47:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=883373) WARNING 07-09 16:47:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_e164d57b'), local_subscribe_addr='ipc:///tmp/e7f96c17-0e30-4c00-a5ec-9fba18b485dc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:25 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:25 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:25 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:25 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=883376) WARNING 07-09 16:47:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=883375) WARNING 07-09 16:47:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=883374) WARNING 07-09 16:47:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=883373) WARNING 07-09 16:47:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:25 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_7... +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:25 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_7... +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:25 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_7... +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:25 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_7... +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:40 [loader.py:458] Loading weights took 13.89 seconds +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:40 [loader.py:458] Loading weights took 14.06 seconds +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:40 [loader.py:458] Loading weights took 14.06 seconds +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:40 [loader.py:458] Loading weights took 14.02 seconds +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:40 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.141504 seconds +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:40 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.284186 seconds +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:40 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.283557 seconds +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:40 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.282914 seconds +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/af873e674b/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:47 [backends.py:430] Dynamo bytecode transform time: 7.00 s +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/af873e674b/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:47 [backends.py:430] Dynamo bytecode transform time: 7.00 s +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/af873e674b/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/af873e674b/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:47 [backends.py:430] Dynamo bytecode transform time: 7.00 s +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:47 [backends.py:430] Dynamo bytecode transform time: 7.00 s +(VllmWorker rank=3 pid=883376) INFO 07-09 16:47:50 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=883373) INFO 07-09 16:47:50 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=883375) INFO 07-09 16:47:50 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=883374) INFO 07-09 16:47:50 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=883376) INFO 07-09 16:48:12 [backends.py:148] Compiling a graph for general shape takes 24.55 s +(VllmWorker rank=1 pid=883374) INFO 07-09 16:48:12 [backends.py:148] Compiling a graph for general shape takes 24.76 s +(VllmWorker rank=2 pid=883375) INFO 07-09 16:48:12 [backends.py:148] Compiling a graph for general shape takes 24.79 s +(VllmWorker rank=0 pid=883373) INFO 07-09 16:48:13 [backends.py:148] Compiling a graph for general shape takes 24.94 s +(VllmWorker rank=1 pid=883374) INFO 07-09 16:48:26 [monitor.py:33] torch.compile takes 31.76 s in total +(VllmWorker rank=0 pid=883373) INFO 07-09 16:48:26 [monitor.py:33] torch.compile takes 31.94 s in total +(VllmWorker rank=2 pid=883375) INFO 07-09 16:48:26 [monitor.py:33] torch.compile takes 31.79 s in total +(VllmWorker rank=3 pid=883376) INFO 07-09 16:48:26 [monitor.py:33] torch.compile takes 31.55 s in total +INFO 07-09 16:48:27 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 16:48:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 16:48:27 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:48:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:48:27 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 16:48:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 16:48:27 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 16:48:27 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=2 pid=883375) INFO 07-09 16:49:04 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 2.44 GiB +(VllmWorker rank=3 pid=883376) INFO 07-09 16:49:04 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 2.44 GiB +(VllmWorker rank=1 pid=883374) INFO 07-09 16:49:04 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 2.44 GiB +(VllmWorker rank=0 pid=883373) INFO 07-09 16:49:04 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 2.44 GiB +INFO 07-09 16:49:04 [core.py:159] init engine (profile, create kv cache, warmup model) took 84.21 seconds +INFO 07-09 16:49:04 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 17:00:14 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 17:00:14 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value| |Stderr| |------------------|------:|---------------------|----:|---|-----:| |all | |math_pass@1:1_samples| 0|± | 0| diff --git a/merge_llama/logs/llama_dare_linear_9.log b/merge_llama/logs/llama_dare_linear_9.log index 06ef5d7a51e25d35d014ed536edc547b81d344ce..018cc55f47a443f6fb70d7a5c26524e52674f119 100644 --- a/merge_llama/logs/llama_dare_linear_9.log +++ b/merge_llama/logs/llama_dare_linear_9.log @@ -1,91 +1,91 @@ -INFO 07-08 22:49:15 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 22:49:23 [config.py:717] This model supports multiple tasks: {'embed', 'generate', 'reward', 'score', 'classify'}. Defaulting to 'generate'. -INFO 07-08 22:49:24 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 22:49:24 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 22:49:25 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_dare_linear_9', speculative_config=None, tokenizer='./merged2/llama_dare_linear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_dare_linear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 22:49:25 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 22:49:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_45047748'), local_subscribe_addr='ipc:///tmp/acb35501-de94-4e6a-bf5a-facba48a843a', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:49:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_00b1d97e'), local_subscribe_addr='ipc:///tmp/17961952-3607-4af0-9578-764639f60284', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:49:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5a6e9ace'), local_subscribe_addr='ipc:///tmp/d4080b03-4e53-44df-a4bc-8ab739572723', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 22:49:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 22:49:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_eba67524'), local_subscribe_addr='ipc:///tmp/b695e098-02a6-4729-aa3b-4d3fcba2f0ee', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_130fd0a5'), local_subscribe_addr='ipc:///tmp/821ccdc1-a3f0-48b2-9e01-bd5ffb8a5157', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:33 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:33 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:33 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:33 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:33 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:33 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:33 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:33 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=488003) WARNING 07-08 22:49:33 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=488004) WARNING 07-08 22:49:33 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=488001) WARNING 07-08 22:49:33 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=488002) WARNING 07-08 22:49:33 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_8eaba29c'), local_subscribe_addr='ipc:///tmp/a6eadacd-e079-4cb3-b942-6a246a8aa25b', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:33 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:33 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:33 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:33 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:33 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:33 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=488004) WARNING 07-08 22:49:33 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:33 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=488003) WARNING 07-08 22:49:33 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:33 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=488001) WARNING 07-08 22:49:33 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=488002) WARNING 07-08 22:49:33 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:33 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_9... -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:33 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_9... -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:33 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_9... -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:33 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_dare_linear_9... -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:49 [loader.py:458] Loading weights took 15.64 seconds -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:49 [loader.py:458] Loading weights took 15.67 seconds -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:49 [loader.py:458] Loading weights took 15.65 seconds -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:49 [loader.py:458] Loading weights took 15.66 seconds -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:50 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.899282 seconds -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:50 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.858337 seconds -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:50 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.919920 seconds -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:50 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.933426 seconds -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9bbc9acd0/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:56 [backends.py:430] Dynamo bytecode transform time: 6.36 s -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9bbc9acd0/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:56 [backends.py:430] Dynamo bytecode transform time: 6.36 s -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9bbc9acd0/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:56 [backends.py:430] Dynamo bytecode transform time: 6.38 s -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9bbc9acd0/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:56 [backends.py:430] Dynamo bytecode transform time: 6.46 s -(VllmWorker rank=2 pid=488003) INFO 07-08 22:49:59 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=488004) INFO 07-08 22:49:59 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=488002) INFO 07-08 22:49:59 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=488001) INFO 07-08 22:49:59 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=488002) INFO 07-08 22:50:21 [backends.py:148] Compiling a graph for general shape takes 24.07 s -(VllmWorker rank=2 pid=488003) INFO 07-08 22:50:21 [backends.py:148] Compiling a graph for general shape takes 24.34 s -(VllmWorker rank=3 pid=488004) INFO 07-08 22:50:21 [backends.py:148] Compiling a graph for general shape takes 24.53 s -(VllmWorker rank=0 pid=488001) INFO 07-08 22:50:21 [backends.py:148] Compiling a graph for general shape takes 24.46 s -(VllmWorker rank=1 pid=488002) INFO 07-08 22:50:34 [monitor.py:33] torch.compile takes 30.44 s in total -(VllmWorker rank=0 pid=488001) INFO 07-08 22:50:34 [monitor.py:33] torch.compile takes 30.92 s in total -(VllmWorker rank=2 pid=488003) INFO 07-08 22:50:34 [monitor.py:33] torch.compile takes 30.71 s in total -(VllmWorker rank=3 pid=488004) INFO 07-08 22:50:34 [monitor.py:33] torch.compile takes 30.89 s in total -INFO 07-08 22:50:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 22:50:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 22:50:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:50:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:50:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 22:50:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 22:50:35 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 22:50:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=0 pid=488001) INFO 07-08 22:51:03 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -(VllmWorker rank=1 pid=488002) INFO 07-08 22:51:03 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -(VllmWorker rank=3 pid=488004) INFO 07-08 22:51:03 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -(VllmWorker rank=2 pid=488003) INFO 07-08 22:51:03 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -INFO 07-08 22:51:03 [core.py:159] init engine (profile, create kv cache, warmup model) took 73.01 seconds -INFO 07-08 22:51:03 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 23:02:10 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 23:02:10 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:00:13 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:00:22 [config.py:717] This model supports multiple tasks: {'classify', 'reward', 'generate', 'score', 'embed'}. Defaulting to 'generate'. +INFO 07-09 17:00:22 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 17:00:22 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 17:00:23 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_dare_linear_9', speculative_config=None, tokenizer='./merged_llama/llama_dare_linear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_dare_linear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 17:00:23 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 17:00:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_571f2b71'), local_subscribe_addr='ipc:///tmp/ba7ffce7-6306-4379-968f-c1ccb11e5609', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:00:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0152c8e2'), local_subscribe_addr='ipc:///tmp/a015ae61-28a6-4d47-8b3f-85eea1961c8c', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:00:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6e0efbdf'), local_subscribe_addr='ipc:///tmp/0031d92b-7d37-4471-88ed-4834248e30ca', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:00:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 17:00:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_85114774'), local_subscribe_addr='ipc:///tmp/e25592ad-93c0-49b1-8e81-bcd93d4350e2', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0a2671ed'), local_subscribe_addr='ipc:///tmp/02e5c2e1-2c44-4c09-b873-0d6159966b3c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:25 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:25 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=888234) WARNING 07-09 17:00:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=888235) WARNING 07-09 17:00:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=888233) WARNING 07-09 17:00:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=888232) WARNING 07-09 17:00:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_fa79a7b5'), local_subscribe_addr='ipc:///tmp/417f3bf1-34d0-4fd8-920d-6c47ff033bdf', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:26 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:26 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:26 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:26 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=888235) WARNING 07-09 17:00:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=888234) WARNING 07-09 17:00:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=888233) WARNING 07-09 17:00:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=888232) WARNING 07-09 17:00:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:26 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_9... +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:26 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_9... +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:26 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_9... +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:26 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_dare_linear_9... +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:39 [loader.py:458] Loading weights took 13.25 seconds +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:39 [loader.py:458] Loading weights took 13.40 seconds +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:39 [loader.py:458] Loading weights took 13.40 seconds +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:39 [loader.py:458] Loading weights took 13.36 seconds +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:39 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.631388 seconds +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:40 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.630492 seconds +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:40 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.633745 seconds +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:40 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.507511 seconds +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:46 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/9d3bf739f8/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:46 [backends.py:430] Dynamo bytecode transform time: 6.42 s +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:46 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/9d3bf739f8/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:46 [backends.py:430] Dynamo bytecode transform time: 6.56 s +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:46 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/9d3bf739f8/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:46 [backends.py:430] Dynamo bytecode transform time: 6.66 s +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:46 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/9d3bf739f8/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:46 [backends.py:430] Dynamo bytecode transform time: 6.67 s +(VllmWorker rank=3 pid=888235) INFO 07-09 17:00:49 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=888232) INFO 07-09 17:00:49 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=888234) INFO 07-09 17:00:50 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=888233) INFO 07-09 17:00:50 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=888232) INFO 07-09 17:01:11 [backends.py:148] Compiling a graph for general shape takes 24.07 s +(VllmWorker rank=3 pid=888235) INFO 07-09 17:01:11 [backends.py:148] Compiling a graph for general shape takes 24.23 s +(VllmWorker rank=1 pid=888233) INFO 07-09 17:01:11 [backends.py:148] Compiling a graph for general shape takes 24.13 s +(VllmWorker rank=2 pid=888234) INFO 07-09 17:01:11 [backends.py:148] Compiling a graph for general shape takes 24.20 s +(VllmWorker rank=1 pid=888233) INFO 07-09 17:01:24 [monitor.py:33] torch.compile takes 30.80 s in total +(VllmWorker rank=0 pid=888232) INFO 07-09 17:01:24 [monitor.py:33] torch.compile takes 30.62 s in total +(VllmWorker rank=3 pid=888235) INFO 07-09 17:01:24 [monitor.py:33] torch.compile takes 30.65 s in total +(VllmWorker rank=2 pid=888234) INFO 07-09 17:01:24 [monitor.py:33] torch.compile takes 30.87 s in total +INFO 07-09 17:01:25 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 17:01:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 17:01:25 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:01:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:01:25 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:01:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:01:25 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 17:01:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=3 pid=888235) INFO 07-09 17:01:55 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +(VllmWorker rank=1 pid=888233) INFO 07-09 17:01:55 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +(VllmWorker rank=2 pid=888234) INFO 07-09 17:01:55 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +(VllmWorker rank=0 pid=888232) INFO 07-09 17:01:55 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB +INFO 07-09 17:01:55 [core.py:159] init engine (profile, create kv cache, warmup model) took 75.83 seconds +INFO 07-09 17:01:56 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 17:13:01 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 17:13:01 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value| |Stderr| |------------------|------:|---------------------|----:|---|-----:| |all | |math_pass@1:1_samples| 0|± | 0| diff --git a/merge_llama/logs/llama_linear_1.log b/merge_llama/logs/llama_linear_1.log index 81f717a99250a61cc84e337a62813666ae0ba18e..47eef9dbc7ed23a00a499bd6eb0508e1f6e78b33 100644 --- a/merge_llama/logs/llama_linear_1.log +++ b/merge_llama/logs/llama_linear_1.log @@ -1,100 +1,100 @@ -INFO 07-08 23:02:09 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 23:02:17 [config.py:717] This model supports multiple tasks: {'classify', 'reward', 'embed', 'generate', 'score'}. Defaulting to 'generate'. -INFO 07-08 23:02:17 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 23:02:17 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 23:02:19 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_linear_1', speculative_config=None, tokenizer='./merged2/llama_linear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_linear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 23:02:19 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 23:02:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_c477e76a'), local_subscribe_addr='ipc:///tmp/b6088acf-b9f1-459f-a5fa-6c82442016e0', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:02:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5b90ff7e'), local_subscribe_addr='ipc:///tmp/e75144c9-285e-4b53-82dc-aa7679976178', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:02:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_73bdc171'), local_subscribe_addr='ipc:///tmp/56977811-4d9b-48ab-99f8-53cbe70acdcb', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:02:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 23:02:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f67b17cd'), local_subscribe_addr='ipc:///tmp/fed143fc-b7d5-4aeb-a393-4537b179ffa6', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c0722aa4'), local_subscribe_addr='ipc:///tmp/544c2794-96a1-4657-bd3b-724724c86035', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:26 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:26 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=493893) WARNING 07-08 23:02:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=493892) WARNING 07-08 23:02:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=493890) WARNING 07-08 23:02:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=493891) WARNING 07-08 23:02:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_f146de96'), local_subscribe_addr='ipc:///tmp/98063888-ebff-41e1-8c5e-50a1b3b28579', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:27 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:27 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:27 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:27 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=493893) WARNING 07-08 23:02:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=493892) WARNING 07-08 23:02:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:27 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=493890) WARNING 07-08 23:02:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=493891) WARNING 07-08 23:02:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_1... -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_1... -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_1... -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:27 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_1... -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:41 [loader.py:458] Loading weights took 14.00 seconds -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:41 [loader.py:458] Loading weights took 14.12 seconds -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:41 [loader.py:458] Loading weights took 14.13 seconds -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:41 [loader.py:458] Loading weights took 14.09 seconds -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.270209 seconds -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.350622 seconds -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.351174 seconds -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:41 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.352205 seconds -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9c324cef5/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:48 [backends.py:430] Dynamo bytecode transform time: 6.47 s -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9c324cef5/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:48 [backends.py:430] Dynamo bytecode transform time: 6.48 s -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9c324cef5/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:48 [backends.py:430] Dynamo bytecode transform time: 6.50 s -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f9c324cef5/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:48 [backends.py:430] Dynamo bytecode transform time: 6.66 s -(VllmWorker rank=3 pid=493893) INFO 07-08 23:02:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=493891) INFO 07-08 23:02:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=493892) INFO 07-08 23:02:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=493890) INFO 07-08 23:02:51 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=493893) INFO 07-08 23:03:13 [backends.py:148] Compiling a graph for general shape takes 24.39 s -(VllmWorker rank=2 pid=493892) INFO 07-08 23:03:13 [backends.py:148] Compiling a graph for general shape takes 24.36 s -(VllmWorker rank=1 pid=493891) INFO 07-08 23:03:13 [backends.py:148] Compiling a graph for general shape takes 24.55 s -(VllmWorker rank=0 pid=493890) INFO 07-08 23:03:14 [backends.py:148] Compiling a graph for general shape takes 24.99 s -(VllmWorker rank=2 pid=493892) INFO 07-08 23:03:27 [monitor.py:33] torch.compile takes 30.86 s in total -(VllmWorker rank=1 pid=493891) INFO 07-08 23:03:27 [monitor.py:33] torch.compile takes 31.03 s in total -(VllmWorker rank=3 pid=493893) INFO 07-08 23:03:27 [monitor.py:33] torch.compile takes 30.86 s in total -(VllmWorker rank=0 pid=493890) INFO 07-08 23:03:27 [monitor.py:33] torch.compile takes 31.66 s in total -INFO 07-08 23:03:29 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 23:03:29 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 23:03:29 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:03:29 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:03:29 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:03:29 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:03:29 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 23:03:29 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=2 pid=493892) INFO 07-08 23:04:02 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB -(VllmWorker rank=3 pid=493893) INFO 07-08 23:04:02 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB -(VllmWorker rank=1 pid=493891) INFO 07-08 23:04:02 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB -(VllmWorker rank=0 pid=493890) INFO 07-08 23:04:02 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB -INFO 07-08 23:04:02 [core.py:159] init engine (profile, create kv cache, warmup model) took 80.54 seconds -INFO 07-08 23:04:02 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 23:15:23 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 23:15:23 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:51:24 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:51:33 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'generate', 'embed', 'classify'}. Defaulting to 'generate'. +INFO 07-09 20:51:33 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:51:33 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:51:35 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_linear_1', speculative_config=None, tokenizer='./merged_llama/llama_linear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_linear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:51:35 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:51:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_b7731af7'), local_subscribe_addr='ipc:///tmp/d4cb2991-ef4b-4175-8e65-b6e5f346b5e6', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:51:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:51:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2f384b1f'), local_subscribe_addr='ipc:///tmp/ed8b8c1e-1b90-427a-9948-bd0c100416d1', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:51:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:51:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b80d1839'), local_subscribe_addr='ipc:///tmp/f11b03e2-cb17-4e77-94a3-2c327eed552f', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1f6f57fa'), local_subscribe_addr='ipc:///tmp/1fcf59ad-19cc-4c93-90ae-9d1951ad6bd0', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_aaff9982'), local_subscribe_addr='ipc:///tmp/b4ce1247-7b8d-4443-b200-337287b0b1ce', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:37 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:37 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:37 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:37 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=997817) WARNING 07-09 20:51:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=997819) WARNING 07-09 20:51:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=997818) WARNING 07-09 20:51:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=997820) WARNING 07-09 20:51:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_502d338f'), local_subscribe_addr='ipc:///tmp/9eae70aa-404f-42c3-94f1-a61b4b859243', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:37 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:37 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:37 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:37 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=997819) WARNING 07-09 20:51:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=997820) WARNING 07-09 20:51:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=997817) WARNING 07-09 20:51:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=997818) WARNING 07-09 20:51:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_1... +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_1... +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_1... +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_1... +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:54 [loader.py:458] Loading weights took 16.35 seconds +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:54 [loader.py:458] Loading weights took 16.44 seconds +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:54 [loader.py:458] Loading weights took 16.44 seconds +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:54 [loader.py:458] Loading weights took 16.44 seconds +(VllmWorker rank=3 pid=997820) INFO 07-09 20:51:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.669321 seconds +(VllmWorker rank=2 pid=997819) INFO 07-09 20:51:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.768064 seconds +(VllmWorker rank=0 pid=997817) INFO 07-09 20:51:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.762350 seconds +(VllmWorker rank=1 pid=997818) INFO 07-09 20:51:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.767481 seconds +(VllmWorker rank=1 pid=997818) INFO 07-09 20:52:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a963c624bd/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=997820) INFO 07-09 20:52:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a963c624bd/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=997817) INFO 07-09 20:52:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a963c624bd/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=997818) INFO 07-09 20:52:01 [backends.py:430] Dynamo bytecode transform time: 6.52 s +(VllmWorker rank=2 pid=997819) INFO 07-09 20:52:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a963c624bd/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=997820) INFO 07-09 20:52:01 [backends.py:430] Dynamo bytecode transform time: 6.52 s +(VllmWorker rank=0 pid=997817) INFO 07-09 20:52:01 [backends.py:430] Dynamo bytecode transform time: 6.52 s +(VllmWorker rank=2 pid=997819) INFO 07-09 20:52:01 [backends.py:430] Dynamo bytecode transform time: 6.52 s +(VllmWorker rank=1 pid=997818) INFO 07-09 20:52:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=997817) INFO 07-09 20:52:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=997820) INFO 07-09 20:52:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=997819) INFO 07-09 20:52:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=997817) INFO 07-09 20:52:26 [backends.py:148] Compiling a graph for general shape takes 24.08 s +(VllmWorker rank=3 pid=997820) INFO 07-09 20:52:26 [backends.py:148] Compiling a graph for general shape takes 24.13 s +(VllmWorker rank=1 pid=997818) INFO 07-09 20:52:26 [backends.py:148] Compiling a graph for general shape takes 24.26 s +(VllmWorker rank=2 pid=997819) INFO 07-09 20:52:26 [backends.py:148] Compiling a graph for general shape takes 24.40 s +(VllmWorker rank=3 pid=997820) INFO 07-09 20:52:39 [monitor.py:33] torch.compile takes 30.65 s in total +(VllmWorker rank=2 pid=997819) INFO 07-09 20:52:39 [monitor.py:33] torch.compile takes 30.92 s in total +(VllmWorker rank=0 pid=997817) INFO 07-09 20:52:39 [monitor.py:33] torch.compile takes 30.60 s in total +(VllmWorker rank=1 pid=997818) INFO 07-09 20:52:39 [monitor.py:33] torch.compile takes 30.77 s in total +INFO 07-09 20:52:40 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 20:52:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 20:52:40 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 20:52:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 20:52:40 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 20:52:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 20:52:40 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 20:52:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=3 pid=997820) INFO 07-09 20:53:07 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +(VllmWorker rank=1 pid=997818) INFO 07-09 20:53:07 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +(VllmWorker rank=0 pid=997817) INFO 07-09 20:53:07 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +(VllmWorker rank=2 pid=997819) INFO 07-09 20:53:08 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +INFO 07-09 20:53:08 [core.py:159] init engine (profile, create kv cache, warmup model) took 73.33 seconds +INFO 07-09 20:53:08 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 20:54:03 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 20:54:03 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.8711|± |0.0317| -| | |sem |0.7084|± |0.0213| -|mm\|arc_challenge\|0| 0|sem |0.9502|± |0.0122| -|mm\|arc_easy\|0 | 0|sem |0.9737|± |0.0055| -|mm\|commonsenseqa\|0| 0|sem |0.8799|± |0.0194| +|all | |math_pass@1:1_samples|0.1038|± |0.0188| +| | |sem |0.1949|± |0.0198| +|mm\|arc_challenge\|0| 0|sem |0.3458|± |0.0266| +|mm\|arc_easy\|0 | 0|sem |0.4192|± |0.0171| +|mm\|commonsenseqa\|0| 0|sem |0.1979|± |0.0237| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8699|± |0.0141| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8723|± |0.0492| -|mm\|truthfulqa\|0 | 0|sem |0.7381|± |0.0483| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.1863|± |0.0163| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.0213|± |0.0213| +|mm\|truthfulqa\|0 | 0|sem |0.0119|± |0.0119| diff --git a/merge_llama/logs/llama_linear_3.log b/merge_llama/logs/llama_linear_3.log index 85141f30133b342b227b3f978b09427bb29a67c7..bc0ade036c58871185ec56dbea1c2421e866d1c3 100644 --- a/merge_llama/logs/llama_linear_3.log +++ b/merge_llama/logs/llama_linear_3.log @@ -1,100 +1,96 @@ -INFO 07-08 23:15:22 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 23:15:31 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'classify', 'reward', 'generate'}. Defaulting to 'generate'. -INFO 07-08 23:15:31 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 23:15:31 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 23:15:33 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_linear_3', speculative_config=None, tokenizer='./merged2/llama_linear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_linear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 23:15:33 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 23:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_70d95901'), local_subscribe_addr='ipc:///tmp/0cdaf875-d325-4164-871a-00ecfc6f3fef', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_87f720e2'), local_subscribe_addr='ipc:///tmp/dd8eda34-579b-464b-a9ea-c84d1a71f7de', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1963e98e'), local_subscribe_addr='ipc:///tmp/b60995d3-1db8-4b3a-a3a0-f94c74816724', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_90b13b18'), local_subscribe_addr='ipc:///tmp/ce24b2ec-b3c5-4e04-a337-6378042c9220', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_99defe59'), local_subscribe_addr='ipc:///tmp/47ddc143-2247-42f5-9fac-b1cd77fdec45', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:40 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:40 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:40 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:40 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:40 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:40 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:40 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:40 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=499920) WARNING 07-08 23:15:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=499921) WARNING 07-08 23:15:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=499918) WARNING 07-08 23:15:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=499919) WARNING 07-08 23:15:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_0f5e406c'), local_subscribe_addr='ipc:///tmp/d67cf6d4-f401-4648-96ab-1bbebe197778', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:40 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:40 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:40 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:40 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:40 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:40 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=499921) WARNING 07-08 23:15:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=499920) WARNING 07-08 23:15:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:40 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:40 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=499918) WARNING 07-08 23:15:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=499919) WARNING 07-08 23:15:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:40 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_3... -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:40 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_3... -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:40 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_3... -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:40 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_3... -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:55 [loader.py:458] Loading weights took 14.37 seconds -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:55 [loader.py:458] Loading weights took 14.44 seconds -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:55 [loader.py:458] Loading weights took 14.48 seconds -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:55 [loader.py:458] Loading weights took 14.44 seconds -(VllmWorker rank=2 pid=499920) INFO 07-08 23:15:55 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.592294 seconds -(VllmWorker rank=3 pid=499921) INFO 07-08 23:15:55 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.703862 seconds -(VllmWorker rank=1 pid=499919) INFO 07-08 23:15:55 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.710342 seconds -(VllmWorker rank=0 pid=499918) INFO 07-08 23:15:56 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.704882 seconds -(VllmWorker rank=2 pid=499920) INFO 07-08 23:16:02 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f370d68ce4/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=499920) INFO 07-08 23:16:02 [backends.py:430] Dynamo bytecode transform time: 6.40 s -(VllmWorker rank=3 pid=499921) INFO 07-08 23:16:02 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f370d68ce4/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=499921) INFO 07-08 23:16:02 [backends.py:430] Dynamo bytecode transform time: 6.41 s -(VllmWorker rank=0 pid=499918) INFO 07-08 23:16:02 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f370d68ce4/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=499918) INFO 07-08 23:16:02 [backends.py:430] Dynamo bytecode transform time: 6.47 s -(VllmWorker rank=1 pid=499919) INFO 07-08 23:16:02 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f370d68ce4/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=499919) INFO 07-08 23:16:02 [backends.py:430] Dynamo bytecode transform time: 6.51 s -(VllmWorker rank=3 pid=499921) INFO 07-08 23:16:05 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=499920) INFO 07-08 23:16:05 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=499918) INFO 07-08 23:16:05 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=499919) INFO 07-08 23:16:05 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=499921) INFO 07-08 23:16:27 [backends.py:148] Compiling a graph for general shape takes 24.29 s -(VllmWorker rank=2 pid=499920) INFO 07-08 23:16:27 [backends.py:148] Compiling a graph for general shape takes 24.63 s -(VllmWorker rank=0 pid=499918) INFO 07-08 23:16:28 [backends.py:148] Compiling a graph for general shape takes 24.83 s -(VllmWorker rank=1 pid=499919) INFO 07-08 23:16:28 [backends.py:148] Compiling a graph for general shape takes 25.07 s -(VllmWorker rank=0 pid=499918) INFO 07-08 23:16:41 [monitor.py:33] torch.compile takes 31.30 s in total -(VllmWorker rank=3 pid=499921) INFO 07-08 23:16:41 [monitor.py:33] torch.compile takes 30.71 s in total -(VllmWorker rank=2 pid=499920) INFO 07-08 23:16:41 [monitor.py:33] torch.compile takes 31.04 s in total -(VllmWorker rank=1 pid=499919) INFO 07-08 23:16:41 [monitor.py:33] torch.compile takes 31.58 s in total -INFO 07-08 23:16:43 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 23:16:43 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 23:16:43 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:16:43 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:16:43 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:16:43 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:16:43 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 23:16:43 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=2 pid=499920) INFO 07-08 23:17:18 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 2.44 GiB -(VllmWorker rank=3 pid=499921) INFO 07-08 23:17:18 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 2.44 GiB -(VllmWorker rank=1 pid=499919) INFO 07-08 23:17:18 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 2.44 GiB -(VllmWorker rank=0 pid=499918) INFO 07-08 23:17:18 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 2.44 GiB -INFO 07-08 23:17:18 [core.py:159] init engine (profile, create kv cache, warmup model) took 82.19 seconds -INFO 07-08 23:17:18 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 23:28:45 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 23:28:45 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:54:02 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:54:11 [config.py:717] This model supports multiple tasks: {'generate', 'reward', 'classify', 'embed', 'score'}. Defaulting to 'generate'. +INFO 07-09 20:54:11 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:54:11 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:54:12 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_linear_3', speculative_config=None, tokenizer='./merged_llama/llama_linear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_linear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:54:12 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:54:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_f2fa3e1a'), local_subscribe_addr='ipc:///tmp/e02e6fdc-8454-4ea0-8b7c-f81e10d75173', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:54:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0a531920'), local_subscribe_addr='ipc:///tmp/5da86095-c6a9-4d2d-a4f3-5d08bce691cf', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:54:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e8690cdd'), local_subscribe_addr='ipc:///tmp/39a108b1-c56a-4e24-80a4-659a876b80e2', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:54:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:54:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_25fe4d2f'), local_subscribe_addr='ipc:///tmp/edf2909c-a110-4db4-9719-5a22e4a25190', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:13 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_df77009d'), local_subscribe_addr='ipc:///tmp/deb636e3-fb0a-4b81-9565-507eab83d9d9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:14 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:14 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:14 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:14 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:14 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:14 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:14 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:14 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=1000249) WARNING 07-09 20:54:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=1000248) WARNING 07-09 20:54:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1000246) WARNING 07-09 20:54:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=1000247) WARNING 07-09 20:54:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_8e9c32f9'), local_subscribe_addr='ipc:///tmp/f34a6f3f-1e1c-4250-b02b-16fa115d9cf1', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:14 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:14 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=1000246) WARNING 07-09 20:54:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:14 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=1000247) WARNING 07-09 20:54:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:14 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=1000248) WARNING 07-09 20:54:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=1000249) WARNING 07-09 20:54:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:14 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_3... +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:14 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_3... +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:14 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_3... +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:14 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_3... +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:30 [loader.py:458] Loading weights took 15.31 seconds +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:30 [loader.py:458] Loading weights took 15.31 seconds +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:30 [loader.py:458] Loading weights took 15.34 seconds +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:30 [loader.py:458] Loading weights took 15.32 seconds +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:30 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.526348 seconds +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:30 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.563975 seconds +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:30 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.575091 seconds +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:30 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.571665 seconds +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e396222f44/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:37 [backends.py:430] Dynamo bytecode transform time: 6.36 s +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e396222f44/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:37 [backends.py:430] Dynamo bytecode transform time: 6.40 s +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e396222f44/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:37 [backends.py:430] Dynamo bytecode transform time: 6.44 s +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e396222f44/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:37 [backends.py:430] Dynamo bytecode transform time: 6.52 s +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.503 s +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.428 s +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.522 s +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.542 s +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:54:43 [monitor.py:33] torch.compile takes 6.40 s in total +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:54:43 [monitor.py:33] torch.compile takes 6.36 s in total +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:54:43 [monitor.py:33] torch.compile takes 6.44 s in total +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:54:43 [monitor.py:33] torch.compile takes 6.52 s in total +INFO 07-09 20:54:45 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 20:54:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 20:54:45 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 20:54:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 20:54:45 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 20:54:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 20:54:45 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 20:54:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=3 pid=1000249) INFO 07-09 20:55:11 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.44 GiB +(VllmWorker rank=2 pid=1000248) INFO 07-09 20:55:11 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.44 GiB +(VllmWorker rank=1 pid=1000247) INFO 07-09 20:55:11 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.44 GiB +(VllmWorker rank=0 pid=1000246) INFO 07-09 20:55:11 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.44 GiB +INFO 07-09 20:55:11 [core.py:159] init engine (profile, create kv cache, warmup model) took 40.76 seconds +INFO 07-09 20:55:12 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 20:58:45 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 20:58:45 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.8765|± |0.0300| -| | |sem |0.7081|± |0.0214| -|mm\|arc_challenge\|0| 0|sem |0.9377|± |0.0135| -|mm\|arc_easy\|0 | 0|sem |0.9820|± |0.0046| -|mm\|commonsenseqa\|0| 0|sem |0.8587|± |0.0207| +|all | |math_pass@1:1_samples|0.6893|± |0.0440| +| | |sem |0.5564|± |0.0302| +|mm\|arc_challenge\|0| 0|sem |0.7259|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.7796|± |0.0144| +|mm\|commonsenseqa\|0| 0|sem |0.6572|± |0.0283| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8594|± |0.0146| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8936|± |0.0455| -|mm\|truthfulqa\|0 | 0|sem |0.7619|± |0.0468| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.6977|± |0.0193| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.6809|± |0.0687| +|mm\|truthfulqa\|0 | 0|sem |0.6190|± |0.0533| diff --git a/merge_llama/logs/llama_linear_5.log b/merge_llama/logs/llama_linear_5.log index f109b3c80d6f5be5ab31ae34bd2525c181ee9a43..e5e697a5d648e595545f0e38827919fb8c15218b 100644 --- a/merge_llama/logs/llama_linear_5.log +++ b/merge_llama/logs/llama_linear_5.log @@ -1,100 +1,100 @@ -INFO 07-08 23:28:44 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 23:28:53 [config.py:717] This model supports multiple tasks: {'classify', 'embed', 'score', 'generate', 'reward'}. Defaulting to 'generate'. -INFO 07-08 23:28:53 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 23:28:53 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 23:28:55 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_linear_5', speculative_config=None, tokenizer='./merged2/llama_linear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_linear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 23:28:55 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 23:28:55 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_8163ea60'), local_subscribe_addr='ipc:///tmp/6218bc7b-5481-4a22-b4a4-d891d8bd081c', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:28:55 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=506017) INFO 07-08 23:28:55 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d99d9c0a'), local_subscribe_addr='ipc:///tmp/c1f480f0-375f-4a68-971f-5b44b27b7299', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:28:55 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 23:28:55 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=506016) INFO 07-08 23:28:55 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_10ec160e'), local_subscribe_addr='ipc:///tmp/97c49779-d44c-4184-a5ef-c37e705321a2', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:28:55 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=506018) INFO 07-08 23:28:55 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d44484e4'), local_subscribe_addr='ipc:///tmp/b579de5d-5bc1-4746-88c1-910d88f19e66', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=506019) INFO 07-08 23:28:55 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e28ddcc5'), local_subscribe_addr='ipc:///tmp/503d1ad8-1bb8-43de-91df-c286a5dd6249', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:02 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:02 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:02 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:02 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:02 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:02 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:02 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:02 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=506018) WARNING 07-08 23:29:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=506019) WARNING 07-08 23:29:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=506016) WARNING 07-08 23:29:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=506017) WARNING 07-08 23:29:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_40b4c6bb'), local_subscribe_addr='ipc:///tmp/3b06376f-30ab-4059-9b60-6e0c3eb64249', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:03 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:03 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:03 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:03 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:03 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=506018) WARNING 07-08 23:29:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:03 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:03 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=506016) WARNING 07-08 23:29:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=506019) WARNING 07-08 23:29:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:03 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=506017) WARNING 07-08 23:29:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:03 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_5... -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:03 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_5... -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:03 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_5... -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:03 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_5... -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:19 [loader.py:458] Loading weights took 16.20 seconds -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:19 [loader.py:458] Loading weights took 16.26 seconds -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:19 [loader.py:458] Loading weights took 16.23 seconds -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:19 [loader.py:458] Loading weights took 16.24 seconds -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:19 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.416438 seconds -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:19 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.489719 seconds -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:19 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.499811 seconds -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:19 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.496381 seconds -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f8b4e5ecce/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:26 [backends.py:430] Dynamo bytecode transform time: 6.59 s -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f8b4e5ecce/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:26 [backends.py:430] Dynamo bytecode transform time: 6.59 s -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f8b4e5ecce/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f8b4e5ecce/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:26 [backends.py:430] Dynamo bytecode transform time: 6.59 s -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:26 [backends.py:430] Dynamo bytecode transform time: 6.59 s -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:29 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:29 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:29 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:29 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=506019) INFO 07-08 23:29:51 [backends.py:148] Compiling a graph for general shape takes 24.18 s -(VllmWorker rank=2 pid=506018) INFO 07-08 23:29:51 [backends.py:148] Compiling a graph for general shape takes 24.30 s -(VllmWorker rank=0 pid=506016) INFO 07-08 23:29:51 [backends.py:148] Compiling a graph for general shape takes 24.38 s -(VllmWorker rank=1 pid=506017) INFO 07-08 23:29:51 [backends.py:148] Compiling a graph for general shape takes 24.39 s -(VllmWorker rank=3 pid=506019) INFO 07-08 23:30:05 [monitor.py:33] torch.compile takes 30.77 s in total -(VllmWorker rank=2 pid=506018) INFO 07-08 23:30:05 [monitor.py:33] torch.compile takes 30.89 s in total -(VllmWorker rank=1 pid=506017) INFO 07-08 23:30:05 [monitor.py:33] torch.compile takes 30.98 s in total -(VllmWorker rank=0 pid=506016) INFO 07-08 23:30:05 [monitor.py:33] torch.compile takes 30.97 s in total -INFO 07-08 23:30:06 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 23:30:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 23:30:06 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:30:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:30:06 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:30:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:30:06 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 23:30:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=1 pid=506017) INFO 07-08 23:30:37 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 2.44 GiB -(VllmWorker rank=2 pid=506018) INFO 07-08 23:30:37 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 2.44 GiB -(VllmWorker rank=3 pid=506019) INFO 07-08 23:30:37 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 2.44 GiB -(VllmWorker rank=0 pid=506016) INFO 07-08 23:30:37 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 2.44 GiB -INFO 07-08 23:30:37 [core.py:159] init engine (profile, create kv cache, warmup model) took 77.57 seconds -INFO 07-08 23:30:37 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 23:41:56 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 23:41:56 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:58:44 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:58:52 [config.py:717] This model supports multiple tasks: {'reward', 'embed', 'classify', 'generate', 'score'}. Defaulting to 'generate'. +INFO 07-09 20:58:53 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:58:53 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:58:54 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_linear_5', speculative_config=None, tokenizer='./merged_llama/llama_linear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_linear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:58:54 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:58:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_56b477aa'), local_subscribe_addr='ipc:///tmp/9ced3364-9e20-4602-8122-db8e8c997603', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:58:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:58:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_181a7847'), local_subscribe_addr='ipc:///tmp/8b71d46d-ec35-4a73-9588-b10082506614', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:58:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:58:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0436d177'), local_subscribe_addr='ipc:///tmp/59f7444c-4272-4c42-a3d4-cca389dd127c', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:58:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:58:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:58:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5432e951'), local_subscribe_addr='ipc:///tmp/c7f54a9c-25a6-4622-936e-af6d5cdc6783', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:58:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cff0ca80'), local_subscribe_addr='ipc:///tmp/0ae039e2-5739-4f05-b80f-9aae8f54936c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:58:55 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:58:55 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:58:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:58:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:58:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:58:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:58:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:58:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=1003902) WARNING 07-09 20:58:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=1003903) WARNING 07-09 20:58:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=1003901) WARNING 07-09 20:58:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1003898) WARNING 07-09 20:58:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:58:56 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_d12bdf91'), local_subscribe_addr='ipc:///tmp/59ca861d-3140-48a7-a337-12b001d8ff41', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:58:56 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:58:56 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:58:56 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:58:56 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:58:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:58:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:58:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:58:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=1003903) WARNING 07-09 20:58:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=1003902) WARNING 07-09 20:58:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=1003898) WARNING 07-09 20:58:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=1003901) WARNING 07-09 20:58:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:58:56 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_5... +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:58:56 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_5... +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:58:56 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_5... +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:58:56 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_5... +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:59:12 [loader.py:458] Loading weights took 15.37 seconds +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:59:12 [loader.py:458] Loading weights took 15.57 seconds +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:59:12 [loader.py:458] Loading weights took 15.57 seconds +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:59:12 [loader.py:458] Loading weights took 15.53 seconds +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:59:12 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.635867 seconds +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:59:12 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.802255 seconds +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:59:12 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.802081 seconds +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:59:12 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.808091 seconds +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:59:19 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1b0a7b72c1/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:59:19 [backends.py:430] Dynamo bytecode transform time: 6.59 s +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:59:19 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1b0a7b72c1/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:59:19 [backends.py:430] Dynamo bytecode transform time: 6.65 s +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:59:19 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1b0a7b72c1/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:59:19 [backends.py:430] Dynamo bytecode transform time: 6.78 s +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:59:19 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1b0a7b72c1/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:59:19 [backends.py:430] Dynamo bytecode transform time: 6.81 s +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:59:22 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:59:22 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:59:22 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:59:23 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:59:45 [backends.py:148] Compiling a graph for general shape takes 25.66 s +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:59:45 [backends.py:148] Compiling a graph for general shape takes 25.84 s +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:59:46 [backends.py:148] Compiling a graph for general shape takes 26.00 s +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:59:46 [backends.py:148] Compiling a graph for general shape takes 26.39 s +(VllmWorker rank=3 pid=1003903) INFO 07-09 20:59:59 [monitor.py:33] torch.compile takes 32.43 s in total +(VllmWorker rank=0 pid=1003898) INFO 07-09 20:59:59 [monitor.py:33] torch.compile takes 32.78 s in total +(VllmWorker rank=2 pid=1003902) INFO 07-09 20:59:59 [monitor.py:33] torch.compile takes 32.32 s in total +(VllmWorker rank=1 pid=1003901) INFO 07-09 20:59:59 [monitor.py:33] torch.compile takes 33.19 s in total +INFO 07-09 21:00:01 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 21:00:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 21:00:01 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 21:00:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 21:00:01 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 21:00:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 21:00:01 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 21:00:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=1 pid=1003901) INFO 07-09 21:00:34 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +(VllmWorker rank=2 pid=1003902) INFO 07-09 21:00:34 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +(VllmWorker rank=3 pid=1003903) INFO 07-09 21:00:34 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +(VllmWorker rank=0 pid=1003898) INFO 07-09 21:00:34 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +INFO 07-09 21:00:34 [core.py:159] init engine (profile, create kv cache, warmup model) took 82.08 seconds +INFO 07-09 21:00:35 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 21:04:06 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 21:04:06 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.7230|± |0.0410| -| | |sem |0.7022|± |0.0220| -|mm\|arc_challenge\|0| 0|sem |0.9346|± |0.0138| -|mm\|arc_easy\|0 | 0|sem |0.9749|± |0.0054| -|mm\|commonsenseqa\|0| 0|sem |0.8516|± |0.0212| +|all | |math_pass@1:1_samples|0.8537|± |0.0309| +| | |sem |0.6363|± |0.0266| +|mm\|arc_challenge\|0| 0|sem |0.8692|± |0.0189| +|mm\|arc_easy\|0 | 0|sem |0.9281|± |0.0089| +|mm\|commonsenseqa\|0| 0|sem |0.7173|± |0.0268| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.6801|± |0.0196| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7660|± |0.0624| -|mm\|truthfulqa\|0 | 0|sem |0.7500|± |0.0475| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8137|± |0.0163| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8936|± |0.0455| +|mm\|truthfulqa\|0 | 0|sem |0.6667|± |0.0517| diff --git a/merge_llama/logs/llama_linear_7.log b/merge_llama/logs/llama_linear_7.log index 92ea28cbcc80060794aec1c61d26c3e8dda7887a..7bfaa4b0670d00390c998e6264f2d9f57c2f1746 100644 --- a/merge_llama/logs/llama_linear_7.log +++ b/merge_llama/logs/llama_linear_7.log @@ -1,100 +1,100 @@ -INFO 07-08 23:41:55 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 23:42:03 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'classify', 'reward', 'embed'}. Defaulting to 'generate'. -INFO 07-08 23:42:04 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 23:42:04 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 23:42:05 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_linear_7', speculative_config=None, tokenizer='./merged2/llama_linear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_linear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 23:42:05 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 23:42:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_59a1f688'), local_subscribe_addr='ipc:///tmp/8c64f480-062e-422d-abb8-6970f276d987', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:42:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0e61d2b4'), local_subscribe_addr='ipc:///tmp/16c5bd5c-da7c-4bee-ae16-224880eb754f', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:42:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_77b4c107'), local_subscribe_addr='ipc:///tmp/1f46e015-7baa-46dd-b084-843beda7543d', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:42:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 23:42:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_89f5a5d4'), local_subscribe_addr='ipc:///tmp/6923faf8-6d57-4996-929c-db9e9ca5de89', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_505503ac'), local_subscribe_addr='ipc:///tmp/21b5800f-4658-433c-bcaf-8250629c769f', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:12 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:12 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:12 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:12 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:12 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:12 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:12 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:12 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=512040) WARNING 07-08 23:42:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=512041) WARNING 07-08 23:42:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=512035) WARNING 07-08 23:42:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=512034) WARNING 07-08 23:42:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:13 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_a8d6a061'), local_subscribe_addr='ipc:///tmp/7932f9f6-dd0b-4c13-8a64-c5f02f25fbd0', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:13 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:13 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:13 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:13 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:13 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:13 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=512040) WARNING 07-08 23:42:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=512041) WARNING 07-08 23:42:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:13 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:13 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=512034) WARNING 07-08 23:42:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=512035) WARNING 07-08 23:42:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:13 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_7... -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:13 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_7... -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:13 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_7... -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:13 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_7... -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:28 [loader.py:458] Loading weights took 14.99 seconds -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:28 [loader.py:458] Loading weights took 15.15 seconds -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:28 [loader.py:458] Loading weights took 15.10 seconds -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:28 [loader.py:458] Loading weights took 15.10 seconds -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:28 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.208618 seconds -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:29 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.358590 seconds -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:29 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.361096 seconds -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:29 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.364693 seconds -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:35 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6fb082dd12/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:35 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6fb082dd12/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:35 [backends.py:430] Dynamo bytecode transform time: 6.36 s -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:35 [backends.py:430] Dynamo bytecode transform time: 6.36 s -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:35 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6fb082dd12/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:35 [backends.py:430] Dynamo bytecode transform time: 6.43 s -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:35 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6fb082dd12/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:35 [backends.py:430] Dynamo bytecode transform time: 6.44 s -(VllmWorker rank=3 pid=512041) INFO 07-08 23:42:38 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=512040) INFO 07-08 23:42:38 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=512035) INFO 07-08 23:42:39 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=512034) INFO 07-08 23:42:39 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=512041) INFO 07-08 23:43:01 [backends.py:148] Compiling a graph for general shape takes 25.05 s -(VllmWorker rank=2 pid=512040) INFO 07-08 23:43:01 [backends.py:148] Compiling a graph for general shape takes 25.22 s -(VllmWorker rank=0 pid=512034) INFO 07-08 23:43:01 [backends.py:148] Compiling a graph for general shape takes 25.45 s -(VllmWorker rank=1 pid=512035) INFO 07-08 23:43:02 [backends.py:148] Compiling a graph for general shape takes 25.51 s -(VllmWorker rank=3 pid=512041) INFO 07-08 23:43:15 [monitor.py:33] torch.compile takes 31.41 s in total -(VllmWorker rank=1 pid=512035) INFO 07-08 23:43:15 [monitor.py:33] torch.compile takes 31.94 s in total -(VllmWorker rank=0 pid=512034) INFO 07-08 23:43:15 [monitor.py:33] torch.compile takes 31.89 s in total -(VllmWorker rank=2 pid=512040) INFO 07-08 23:43:15 [monitor.py:33] torch.compile takes 31.58 s in total -INFO 07-08 23:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 23:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 23:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 23:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=3 pid=512041) INFO 07-08 23:43:56 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=2 pid=512040) INFO 07-08 23:43:56 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=1 pid=512035) INFO 07-08 23:43:56 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=0 pid=512034) INFO 07-08 23:43:56 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -INFO 07-08 23:43:57 [core.py:159] init engine (profile, create kv cache, warmup model) took 87.82 seconds -INFO 07-08 23:43:57 [core_client.py:439] Core engine process 0 ready. -INFO 07-08 23:55:30 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-08 23:55:30 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 21:04:04 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 21:04:13 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'reward', 'generate', 'classify'}. Defaulting to 'generate'. +INFO 07-09 21:04:13 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 21:04:13 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 21:04:15 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_linear_7', speculative_config=None, tokenizer='./merged_llama/llama_linear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_linear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 21:04:15 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 21:04:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e02672fb'), local_subscribe_addr='ipc:///tmp/b980da30-69a1-4e5d-9ffc-3533907f3936', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 21:04:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_350430bb'), local_subscribe_addr='ipc:///tmp/bdbe4539-847d-42fe-ba3b-397fbb7d3701', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 21:04:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a4d17fb7'), local_subscribe_addr='ipc:///tmp/e5a8e83a-733d-453e-bf54-07cc07928428', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 21:04:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 21:04:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2f788a0e'), local_subscribe_addr='ipc:///tmp/fc6c23bd-6684-41b2-8b6c-801e883d1ba0', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_aa3098ea'), local_subscribe_addr='ipc:///tmp/51390ca1-06f8-49a7-864c-ed73954c6b08', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=1008293) WARNING 07-09 21:04:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=1008292) WARNING 07-09 21:04:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1008290) WARNING 07-09 21:04:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=1008291) WARNING 07-09 21:04:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:17 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_6ec7f336'), local_subscribe_addr='ipc:///tmp/7c3f953b-b0bd-4a63-a796-c5b71041b020', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:17 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:17 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:17 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:17 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=1008292) WARNING 07-09 21:04:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=1008293) WARNING 07-09 21:04:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=1008290) WARNING 07-09 21:04:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=1008291) WARNING 07-09 21:04:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:17 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_7... +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:17 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_7... +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:17 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_7... +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:17 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_7... +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:34 [loader.py:458] Loading weights took 17.21 seconds +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:34 [loader.py:458] Loading weights took 17.32 seconds +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:34 [loader.py:458] Loading weights took 17.32 seconds +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:34 [loader.py:458] Loading weights took 17.29 seconds +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:34 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.469392 seconds +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:35 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.568899 seconds +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:35 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.570475 seconds +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:35 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 17.570519 seconds +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:41 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3fceecbe52/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:41 [backends.py:430] Dynamo bytecode transform time: 6.33 s +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:41 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3fceecbe52/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:41 [backends.py:430] Dynamo bytecode transform time: 6.37 s +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:41 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3fceecbe52/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:41 [backends.py:430] Dynamo bytecode transform time: 6.39 s +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:41 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3fceecbe52/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:41 [backends.py:430] Dynamo bytecode transform time: 6.45 s +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:04:44 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:04:44 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:04:44 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:04:44 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:05:06 [backends.py:148] Compiling a graph for general shape takes 24.23 s +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:05:06 [backends.py:148] Compiling a graph for general shape takes 24.46 s +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:05:06 [backends.py:148] Compiling a graph for general shape takes 24.49 s +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:05:06 [backends.py:148] Compiling a graph for general shape takes 24.55 s +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:05:19 [monitor.py:33] torch.compile takes 30.94 s in total +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:05:19 [monitor.py:33] torch.compile takes 30.82 s in total +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:05:19 [monitor.py:33] torch.compile takes 30.94 s in total +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:05:19 [monitor.py:33] torch.compile takes 30.56 s in total +INFO 07-09 21:05:21 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 21:05:21 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 21:05:21 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 21:05:21 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 21:05:21 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 21:05:21 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 21:05:21 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 21:05:21 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=1 pid=1008291) INFO 07-09 21:05:47 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +(VllmWorker rank=0 pid=1008290) INFO 07-09 21:05:47 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +(VllmWorker rank=3 pid=1008293) INFO 07-09 21:05:47 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +(VllmWorker rank=2 pid=1008292) INFO 07-09 21:05:47 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB +INFO 07-09 21:05:47 [core.py:159] init engine (profile, create kv cache, warmup model) took 72.87 seconds +INFO 07-09 21:05:48 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 21:09:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 21:09:25 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.8688|± |0.0264| -| | |sem |0.6724|± |0.0243| -|mm\|arc_challenge\|0| 0|sem |0.9065|± |0.0163| -|mm\|arc_easy\|0 | 0|sem |0.9653|± |0.0063| -|mm\|commonsenseqa\|0| 0|sem |0.7880|± |0.0243| +|all | |math_pass@1:1_samples|0.8273|± |0.0317| +| | |sem |0.6858|± |0.0235| +|mm\|arc_challenge\|0| 0|sem |0.9190|± |0.0153| +|mm\|arc_easy\|0 | 0|sem |0.9485|± |0.0077| +|mm\|commonsenseqa\|0| 0|sem |0.8233|± |0.0227| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8014|± |0.0167| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9362|± |0.0360| -|mm\|truthfulqa\|0 | 0|sem |0.7024|± |0.0502| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7610|± |0.0179| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8936|± |0.0455| +|mm\|truthfulqa\|0 | 0|sem |0.7381|± |0.0483| diff --git a/merge_llama/logs/llama_linear_9.log b/merge_llama/logs/llama_linear_9.log index 42f2b06b396fbefee2efb76f500f3972750adb1a..1388d30fee34ec7ba11b2536f84e609d1ae4815c 100644 --- a/merge_llama/logs/llama_linear_9.log +++ b/merge_llama/logs/llama_linear_9.log @@ -1,100 +1,100 @@ -INFO 07-08 23:55:29 [__init__.py:239] Automatically detected platform cuda. -INFO 07-08 23:55:38 [config.py:717] This model supports multiple tasks: {'classify', 'score', 'embed', 'generate', 'reward'}. Defaulting to 'generate'. -INFO 07-08 23:55:38 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-08 23:55:38 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-08 23:55:40 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_linear_9', speculative_config=None, tokenizer='./merged2/llama_linear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_linear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-08 23:55:40 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-08 23:55:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_9d6429a1'), local_subscribe_addr='ipc:///tmp/070ba2a8-bb63-456f-a426-f2ee20ab5a9c', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:55:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cd00e2df'), local_subscribe_addr='ipc:///tmp/3cd04d80-7be2-4871-9f12-e6c6f69edd3a', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:55:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8d1c1579'), local_subscribe_addr='ipc:///tmp/140eb04e-beed-4f92-91dc-bb292bf2e035', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-08 23:55:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-08 23:55:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b49b2eef'), local_subscribe_addr='ipc:///tmp/897fbdba-19ed-49dd-9c65-014420376c3c', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8816473d'), local_subscribe_addr='ipc:///tmp/eed30710-9883-46da-93d6-0158a43af1de', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:42 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:42 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:42 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:42 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:42 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:42 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:42 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:42 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=518083) WARNING 07-08 23:55:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=518082) WARNING 07-08 23:55:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=518081) WARNING 07-08 23:55:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=518080) WARNING 07-08 23:55:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_6dfa17bf'), local_subscribe_addr='ipc:///tmp/9c0983eb-2fb5-4da5-9fa5-dbc959b32a0e', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:43 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:43 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:43 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:43 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:43 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=518083) WARNING 07-08 23:55:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:43 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=518082) WARNING 07-08 23:55:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:43 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:43 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=518080) WARNING 07-08 23:55:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=518081) WARNING 07-08 23:55:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:43 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_9... -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:43 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_9... -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:43 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_9... -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:43 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_linear_9... -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:47 [loader.py:458] Loading weights took 4.08 seconds -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:47 [loader.py:458] Loading weights took 4.11 seconds -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:47 [loader.py:458] Loading weights took 4.14 seconds -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:47 [loader.py:458] Loading weights took 4.12 seconds -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 4.293635 seconds -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 4.364151 seconds -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 4.377319 seconds -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 4.368003 seconds -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:54 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6c580e8a9e/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:54 [backends.py:430] Dynamo bytecode transform time: 6.34 s -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:54 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6c580e8a9e/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:54 [backends.py:430] Dynamo bytecode transform time: 6.37 s -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:54 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6c580e8a9e/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:54 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6c580e8a9e/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:54 [backends.py:430] Dynamo bytecode transform time: 6.53 s -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:54 [backends.py:430] Dynamo bytecode transform time: 6.53 s -(VllmWorker rank=3 pid=518083) INFO 07-08 23:55:57 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=518082) INFO 07-08 23:55:58 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=518081) INFO 07-08 23:55:58 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=518080) INFO 07-08 23:55:58 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=518083) INFO 07-08 23:56:19 [backends.py:148] Compiling a graph for general shape takes 24.17 s -(VllmWorker rank=2 pid=518082) INFO 07-08 23:56:19 [backends.py:148] Compiling a graph for general shape takes 24.12 s -(VllmWorker rank=1 pid=518081) INFO 07-08 23:56:20 [backends.py:148] Compiling a graph for general shape takes 24.80 s -(VllmWorker rank=0 pid=518080) INFO 07-08 23:56:20 [backends.py:148] Compiling a graph for general shape takes 25.28 s -(VllmWorker rank=3 pid=518083) INFO 07-08 23:56:33 [monitor.py:33] torch.compile takes 30.52 s in total -(VllmWorker rank=1 pid=518081) INFO 07-08 23:56:33 [monitor.py:33] torch.compile takes 31.33 s in total -(VllmWorker rank=0 pid=518080) INFO 07-08 23:56:33 [monitor.py:33] torch.compile takes 31.80 s in total -(VllmWorker rank=2 pid=518082) INFO 07-08 23:56:33 [monitor.py:33] torch.compile takes 30.49 s in total -INFO 07-08 23:56:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-08 23:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-08 23:56:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:56:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-08 23:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-08 23:56:35 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-08 23:56:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=3 pid=518083) INFO 07-08 23:57:19 [gpu_model_runner.py:1686] Graph capturing finished in 44 secs, took 2.44 GiB -(VllmWorker rank=2 pid=518082) INFO 07-08 23:57:19 [gpu_model_runner.py:1686] Graph capturing finished in 45 secs, took 2.44 GiB -(VllmWorker rank=0 pid=518080) INFO 07-08 23:57:20 [gpu_model_runner.py:1686] Graph capturing finished in 45 secs, took 2.44 GiB -(VllmWorker rank=1 pid=518081) INFO 07-08 23:57:20 [gpu_model_runner.py:1686] Graph capturing finished in 45 secs, took 2.44 GiB -INFO 07-08 23:57:20 [core.py:159] init engine (profile, create kv cache, warmup model) took 91.86 seconds -INFO 07-08 23:57:20 [core_client.py:439] Core engine process 0 ready. -INFO 07-09 00:09:02 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-09 00:09:02 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 21:09:24 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 21:09:33 [config.py:717] This model supports multiple tasks: {'classify', 'score', 'embed', 'generate', 'reward'}. Defaulting to 'generate'. +INFO 07-09 21:09:33 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 21:09:33 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 21:09:35 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_linear_9', speculative_config=None, tokenizer='./merged_llama/llama_linear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_linear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 21:09:35 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 21:09:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_bc08c946'), local_subscribe_addr='ipc:///tmp/895ea9ed-cc1a-4e5d-8aa7-cdad5daa5f06', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 21:09:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d1a88baf'), local_subscribe_addr='ipc:///tmp/b4b967aa-6bfe-4bec-b448-0283b493e82c', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 21:09:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 21:09:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7435c6ea'), local_subscribe_addr='ipc:///tmp/a74abd0f-2355-4999-833a-dfa9dd7e05b3', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 21:09:35 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d755cf16'), local_subscribe_addr='ipc:///tmp/b03d14a8-17c4-48d1-b453-d9cb060eda46', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_252f303d'), local_subscribe_addr='ipc:///tmp/7d28fddd-a9f5-4edf-9ee6-eab548ec0e69', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=1012708) WARNING 07-09 21:09:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=1012707) WARNING 07-09 21:09:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1012705) WARNING 07-09 21:09:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=1012706) WARNING 07-09 21:09:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_c88b155f'), local_subscribe_addr='ipc:///tmp/4aa7e9c0-cd04-4f99-96b5-b3ae69e0f9b0', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:37 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:37 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:37 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:37 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=1012708) WARNING 07-09 21:09:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=1012707) WARNING 07-09 21:09:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=1012705) WARNING 07-09 21:09:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=1012706) WARNING 07-09 21:09:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_9... +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_9... +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_9... +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:37 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_linear_9... +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:54 [loader.py:458] Loading weights took 16.64 seconds +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:54 [loader.py:458] Loading weights took 16.72 seconds +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:54 [loader.py:458] Loading weights took 16.74 seconds +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:54 [loader.py:458] Loading weights took 16.77 seconds +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:09:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.873589 seconds +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:09:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.990715 seconds +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:09:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.982990 seconds +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:09:54 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.994223 seconds +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:10:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fed46e6435/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:10:01 [backends.py:430] Dynamo bytecode transform time: 6.51 s +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:10:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fed46e6435/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:10:01 [backends.py:430] Dynamo bytecode transform time: 6.55 s +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:10:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fed46e6435/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:10:01 [backends.py:430] Dynamo bytecode transform time: 6.63 s +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:10:01 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fed46e6435/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:10:01 [backends.py:430] Dynamo bytecode transform time: 6.70 s +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:10:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:10:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:10:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:10:04 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:10:26 [backends.py:148] Compiling a graph for general shape takes 24.48 s +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:10:26 [backends.py:148] Compiling a graph for general shape takes 24.66 s +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:10:26 [backends.py:148] Compiling a graph for general shape takes 24.59 s +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:10:27 [backends.py:148] Compiling a graph for general shape takes 25.66 s +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:10:41 [monitor.py:33] torch.compile takes 31.03 s in total +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:10:41 [monitor.py:33] torch.compile takes 31.17 s in total +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:10:41 [monitor.py:33] torch.compile takes 32.35 s in total +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:10:41 [monitor.py:33] torch.compile takes 31.22 s in total +INFO 07-09 21:10:42 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 21:10:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 21:10:42 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 21:10:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 21:10:42 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 21:10:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 21:10:42 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 21:10:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=3 pid=1012708) INFO 07-09 21:11:26 [gpu_model_runner.py:1686] Graph capturing finished in 44 secs, took 2.44 GiB +(VllmWorker rank=2 pid=1012707) INFO 07-09 21:11:27 [gpu_model_runner.py:1686] Graph capturing finished in 44 secs, took 2.44 GiB +(VllmWorker rank=0 pid=1012705) INFO 07-09 21:11:27 [gpu_model_runner.py:1686] Graph capturing finished in 44 secs, took 2.44 GiB +(VllmWorker rank=1 pid=1012706) INFO 07-09 21:11:27 [gpu_model_runner.py:1686] Graph capturing finished in 45 secs, took 2.44 GiB +INFO 07-09 21:11:27 [core.py:159] init engine (profile, create kv cache, warmup model) took 92.78 seconds +INFO 07-09 21:11:27 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 21:15:28 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 21:15:28 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.6292|± |0.0461| -| | |sem |0.6029|± |0.0285| -|mm\|arc_challenge\|0| 0|sem |0.8006|± |0.0223| -|mm\|arc_easy\|0 | 0|sem |0.8587|± |0.0121| -|mm\|commonsenseqa\|0| 0|sem |0.7244|± |0.0266| +|all | |math_pass@1:1_samples|0.8627|± |0.0237| +| | |sem |0.6932|± |0.0228| +|mm\|arc_challenge\|0| 0|sem |0.9097|± |0.0160| +|mm\|arc_easy\|0 | 0|sem |0.9545|± |0.0072| +|mm\|commonsenseqa\|0| 0|sem |0.8163|± |0.0231| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.6626|± |0.0198| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.5957|± |0.0724| -|mm\|truthfulqa\|0 | 0|sem |0.6310|± |0.0530| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7680|± |0.0177| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9574|± |0.0298| +|mm\|truthfulqa\|0 | 0|sem |0.7857|± |0.0450| diff --git a/merge_llama/logs/llama_ties_1.log b/merge_llama/logs/llama_ties_1.log index 47ec0df2c29122bd900db2a76c660aa748583c9a..656452c21c73c433bbfc135cd0c6d6b80e380798 100644 --- a/merge_llama/logs/llama_ties_1.log +++ b/merge_llama/logs/llama_ties_1.log @@ -1,100 +1,100 @@ -INFO 07-09 00:09:01 [__init__.py:239] Automatically detected platform cuda. -INFO 07-09 00:09:10 [config.py:717] This model supports multiple tasks: {'embed', 'classify', 'generate', 'score', 'reward'}. Defaulting to 'generate'. -INFO 07-09 00:09:10 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-09 00:09:10 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-09 00:09:12 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_ties_1', speculative_config=None, tokenizer='./merged2/llama_ties_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_ties_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-09 00:09:12 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-09 00:09:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_22cb6533'), local_subscribe_addr='ipc:///tmp/0db87f1a-6e1d-48dd-a6dc-4e7470240703', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:09:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_66da5f57'), local_subscribe_addr='ipc:///tmp/8265f4e9-35f0-4aaf-8f81-8d20353b028e', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:09:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-09 00:09:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_66590f81'), local_subscribe_addr='ipc:///tmp/f0159fc1-5b98-4da5-9f99-e33ac7e89d65', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:09:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_abceaf84'), local_subscribe_addr='ipc:///tmp/306a387d-b16a-4c50-b8ec-7fad48ad5e54', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_759eb040'), local_subscribe_addr='ipc:///tmp/0b015122-0ea1-4060-bf39-6ba74032049d', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:20 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:20 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:20 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:20 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:20 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:20 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:20 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:20 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=524541) WARNING 07-09 00:09:20 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=524540) WARNING 07-09 00:09:20 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=524539) WARNING 07-09 00:09:20 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=524538) WARNING 07-09 00:09:20 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:20 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_0c823e14'), local_subscribe_addr='ipc:///tmp/f6302089-1949-4416-84d2-db87ba3c1718', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:20 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:20 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:20 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:20 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:20 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:20 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=524541) WARNING 07-09 00:09:20 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=524540) WARNING 07-09 00:09:20 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:20 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=524539) WARNING 07-09 00:09:20 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:20 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=524538) WARNING 07-09 00:09:20 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:20 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_1... -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:20 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_1... -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:20 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_1... -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:20 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_1... -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:36 [loader.py:458] Loading weights took 14.94 seconds -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:36 [loader.py:458] Loading weights took 15.10 seconds -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:36 [loader.py:458] Loading weights took 15.09 seconds -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:36 [loader.py:458] Loading weights took 15.05 seconds -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:36 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.347854 seconds -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:36 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.348878 seconds -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:36 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.347142 seconds -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:36 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.239616 seconds -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/067e9a182b/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:43 [backends.py:430] Dynamo bytecode transform time: 6.51 s -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/067e9a182b/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:43 [backends.py:430] Dynamo bytecode transform time: 6.52 s -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/067e9a182b/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:43 [backends.py:430] Dynamo bytecode transform time: 6.55 s -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/067e9a182b/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:43 [backends.py:430] Dynamo bytecode transform time: 6.58 s -(VllmWorker rank=2 pid=524540) INFO 07-09 00:09:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=524541) INFO 07-09 00:09:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=524539) INFO 07-09 00:09:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=524538) INFO 07-09 00:09:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=524541) INFO 07-09 00:10:08 [backends.py:148] Compiling a graph for general shape takes 24.96 s -(VllmWorker rank=1 pid=524539) INFO 07-09 00:10:08 [backends.py:148] Compiling a graph for general shape takes 25.03 s -(VllmWorker rank=2 pid=524540) INFO 07-09 00:10:08 [backends.py:148] Compiling a graph for general shape takes 25.16 s -(VllmWorker rank=0 pid=524538) INFO 07-09 00:10:09 [backends.py:148] Compiling a graph for general shape takes 25.30 s -(VllmWorker rank=2 pid=524540) INFO 07-09 00:10:22 [monitor.py:33] torch.compile takes 31.69 s in total -(VllmWorker rank=1 pid=524539) INFO 07-09 00:10:22 [monitor.py:33] torch.compile takes 31.61 s in total -(VllmWorker rank=0 pid=524538) INFO 07-09 00:10:22 [monitor.py:33] torch.compile takes 31.86 s in total -(VllmWorker rank=3 pid=524541) INFO 07-09 00:10:22 [monitor.py:33] torch.compile takes 31.47 s in total -INFO 07-09 00:10:23 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-09 00:10:23 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-09 00:10:23 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:10:23 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:10:23 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:10:23 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:10:23 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-09 00:10:23 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=1 pid=524539) INFO 07-09 00:10:53 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=3 pid=524541) INFO 07-09 00:10:53 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=2 pid=524540) INFO 07-09 00:10:53 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=0 pid=524538) INFO 07-09 00:10:53 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -INFO 07-09 00:10:53 [core.py:159] init engine (profile, create kv cache, warmup model) took 77.23 seconds -INFO 07-09 00:10:54 [core_client.py:439] Core engine process 0 ready. -INFO 07-09 00:22:08 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-09 00:22:08 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:41:51 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:42:00 [config.py:717] This model supports multiple tasks: {'classify', 'score', 'reward', 'embed', 'generate'}. Defaulting to 'generate'. +INFO 07-09 17:42:00 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 17:42:00 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 17:42:01 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_ties_1', speculative_config=None, tokenizer='./merged_llama/llama_ties_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_ties_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 17:42:01 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 17:42:01 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_733b5f73'), local_subscribe_addr='ipc:///tmp/596ef910-9311-4177-9ead-22a28e16cb68', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:42:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a270f7bb'), local_subscribe_addr='ipc:///tmp/46039f78-2801-4e72-9d73-e54d4aeb5102', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:42:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cc428eda'), local_subscribe_addr='ipc:///tmp/a2114a88-e646-4041-88ad-ee7a582b85cd', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:42:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 17:42:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3e79aef8'), local_subscribe_addr='ipc:///tmp/d33aa0ee-5189-4e0d-be0f-76f190c4c4e5', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6bdff9a6'), local_subscribe_addr='ipc:///tmp/b81feafa-f24f-4dd5-a51e-58658c203c15', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=916512) WARNING 07-09 17:42:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=916511) WARNING 07-09 17:42:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=916510) WARNING 07-09 17:42:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=916509) WARNING 07-09 17:42:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_c9c95251'), local_subscribe_addr='ipc:///tmp/32d3d693-9520-4136-b33f-0f89d9e92448', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:04 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:04 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:04 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:04 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=916511) WARNING 07-09 17:42:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=916509) WARNING 07-09 17:42:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=916510) WARNING 07-09 17:42:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=916512) WARNING 07-09 17:42:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:04 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_1... +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:04 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_1... +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:04 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_1... +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:04 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_1... +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:17 [loader.py:458] Loading weights took 13.25 seconds +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:17 [loader.py:458] Loading weights took 13.45 seconds +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:17 [loader.py:458] Loading weights took 13.40 seconds +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:17 [loader.py:458] Loading weights took 13.45 seconds +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:17 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.509470 seconds +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:18 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.670816 seconds +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:18 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.670422 seconds +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:18 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.666300 seconds +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3ed3001f21/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:24 [backends.py:430] Dynamo bytecode transform time: 6.45 s +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3ed3001f21/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:24 [backends.py:430] Dynamo bytecode transform time: 6.46 s +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3ed3001f21/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:24 [backends.py:430] Dynamo bytecode transform time: 6.52 s +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/3ed3001f21/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:24 [backends.py:430] Dynamo bytecode transform time: 6.54 s +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:28 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:28 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:28 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:28 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=916512) INFO 07-09 17:42:49 [backends.py:148] Compiling a graph for general shape takes 24.39 s +(VllmWorker rank=2 pid=916511) INFO 07-09 17:42:50 [backends.py:148] Compiling a graph for general shape takes 24.63 s +(VllmWorker rank=0 pid=916509) INFO 07-09 17:42:50 [backends.py:148] Compiling a graph for general shape takes 24.58 s +(VllmWorker rank=1 pid=916510) INFO 07-09 17:42:50 [backends.py:148] Compiling a graph for general shape takes 24.63 s +(VllmWorker rank=2 pid=916511) INFO 07-09 17:43:03 [monitor.py:33] torch.compile takes 31.09 s in total +(VllmWorker rank=0 pid=916509) INFO 07-09 17:43:03 [monitor.py:33] torch.compile takes 31.12 s in total +(VllmWorker rank=1 pid=916510) INFO 07-09 17:43:03 [monitor.py:33] torch.compile takes 31.16 s in total +(VllmWorker rank=3 pid=916512) INFO 07-09 17:43:03 [monitor.py:33] torch.compile takes 30.84 s in total +INFO 07-09 17:43:04 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 17:43:04 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 17:43:04 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:43:04 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:43:04 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:43:04 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:43:04 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 17:43:04 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=3 pid=916512) INFO 07-09 17:43:38 [gpu_model_runner.py:1686] Graph capturing finished in 34 secs, took 2.44 GiB +(VllmWorker rank=2 pid=916511) INFO 07-09 17:43:39 [gpu_model_runner.py:1686] Graph capturing finished in 34 secs, took 2.44 GiB +(VllmWorker rank=0 pid=916509) INFO 07-09 17:43:39 [gpu_model_runner.py:1686] Graph capturing finished in 34 secs, took 2.44 GiB +(VllmWorker rank=1 pid=916510) INFO 07-09 17:43:39 [gpu_model_runner.py:1686] Graph capturing finished in 34 secs, took 2.44 GiB +INFO 07-09 17:43:39 [core.py:159] init engine (profile, create kv cache, warmup model) took 80.89 seconds +INFO 07-09 17:43:39 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 17:48:58 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 17:48:59 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.7509|± |0.0421| -| | |sem |0.6841|± |0.0235| -|mm\|arc_challenge\|0| 0|sem |0.9190|± |0.0153| -|mm\|arc_easy\|0 | 0|sem |0.9677|± |0.0061| -|mm\|commonsenseqa\|0| 0|sem |0.8198|± |0.0229| +|all | |math_pass@1:1_samples|0.7204|± |0.0410| +| | |sem |0.4566|± |0.0317| +|mm\|arc_challenge\|0| 0|sem |0.6168|± |0.0272| +|mm\|arc_easy\|0 | 0|sem |0.7162|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5336|± |0.0297| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7996|± |0.0168| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7021|± |0.0674| -|mm\|truthfulqa\|0 | 0|sem |0.7143|± |0.0496| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.6749|± |0.0197| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7660|± |0.0624| +|mm\|truthfulqa\|0 | 0|sem |0.4167|± |0.0541| diff --git a/merge_llama/logs/llama_ties_3.log b/merge_llama/logs/llama_ties_3.log index 7278e528ccc4ddb8197f14be4633ab4b6d5383d4..00d294534c6f4fd6b31734e3a52f323e325d4779 100644 --- a/merge_llama/logs/llama_ties_3.log +++ b/merge_llama/logs/llama_ties_3.log @@ -1,100 +1,100 @@ -INFO 07-09 00:22:07 [__init__.py:239] Automatically detected platform cuda. -INFO 07-09 00:22:16 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'classify', 'reward', 'embed'}. Defaulting to 'generate'. -INFO 07-09 00:22:16 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-09 00:22:16 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-09 00:22:17 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_ties_3', speculative_config=None, tokenizer='./merged2/llama_ties_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_ties_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-09 00:22:17 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-09 00:22:17 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_f6d94697'), local_subscribe_addr='ipc:///tmp/72bda55a-f51b-459c-ae78-0e361ddef361', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:22:18 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-09 00:22:18 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:18 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_caf45539'), local_subscribe_addr='ipc:///tmp/657ca8fa-d3f7-494b-98be-1a2735014e8c', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:22:18 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:18 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_76ca5672'), local_subscribe_addr='ipc:///tmp/01b24fe7-b147-4b55-9574-02bf4d12067a', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:22:18 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:18 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a60240fa'), local_subscribe_addr='ipc:///tmp/720e0b50-c6ff-4f27-b746-fd8c1e70160f', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:18 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_aab53d73'), local_subscribe_addr='ipc:///tmp/1be93d98-c236-48d0-8e63-26e85724e38f', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:21 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:21 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:21 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:21 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:21 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:21 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:21 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:21 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=530449) WARNING 07-09 00:22:21 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=530448) WARNING 07-09 00:22:21 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=530446) WARNING 07-09 00:22:21 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=530447) WARNING 07-09 00:22:21 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_61afd87c'), local_subscribe_addr='ipc:///tmp/f360117e-903d-4fdd-b0ba-45158dd46d49', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:21 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:21 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:21 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:21 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:21 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:21 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=530449) WARNING 07-09 00:22:21 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:21 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=530448) WARNING 07-09 00:22:21 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=530446) WARNING 07-09 00:22:21 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:21 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=530447) WARNING 07-09 00:22:21 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:21 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_3... -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:21 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_3... -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:21 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_3... -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:21 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_3... -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:36 [loader.py:458] Loading weights took 14.75 seconds -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:36 [loader.py:458] Loading weights took 14.87 seconds -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:36 [loader.py:458] Loading weights took 14.85 seconds -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:36 [loader.py:458] Loading weights took 14.87 seconds -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:36 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.996291 seconds -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:37 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.088520 seconds -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:37 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.091452 seconds -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:37 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.093427 seconds -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c501ef649a/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:43 [backends.py:430] Dynamo bytecode transform time: 6.41 s -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c501ef649a/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:43 [backends.py:430] Dynamo bytecode transform time: 6.42 s -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c501ef649a/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:43 [backends.py:430] Dynamo bytecode transform time: 6.43 s -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c501ef649a/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:43 [backends.py:430] Dynamo bytecode transform time: 6.45 s -(VllmWorker rank=1 pid=530447) INFO 07-09 00:22:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=530448) INFO 07-09 00:22:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=530449) INFO 07-09 00:22:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=530446) INFO 07-09 00:22:46 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=530447) INFO 07-09 00:23:08 [backends.py:148] Compiling a graph for general shape takes 24.10 s -(VllmWorker rank=0 pid=530446) INFO 07-09 00:23:08 [backends.py:148] Compiling a graph for general shape takes 24.25 s -(VllmWorker rank=2 pid=530448) INFO 07-09 00:23:08 [backends.py:148] Compiling a graph for general shape takes 24.28 s -(VllmWorker rank=3 pid=530449) INFO 07-09 00:23:08 [backends.py:148] Compiling a graph for general shape takes 24.27 s -(VllmWorker rank=0 pid=530446) INFO 07-09 00:23:21 [monitor.py:33] torch.compile takes 30.68 s in total -(VllmWorker rank=2 pid=530448) INFO 07-09 00:23:21 [monitor.py:33] torch.compile takes 30.70 s in total -(VllmWorker rank=3 pid=530449) INFO 07-09 00:23:21 [monitor.py:33] torch.compile takes 30.72 s in total -(VllmWorker rank=1 pid=530447) INFO 07-09 00:23:21 [monitor.py:33] torch.compile takes 30.51 s in total -INFO 07-09 00:23:22 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-09 00:23:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-09 00:23:22 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:23:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:23:22 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:23:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:23:22 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-09 00:23:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=3 pid=530449) INFO 07-09 00:23:50 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -(VllmWorker rank=2 pid=530448) INFO 07-09 00:23:50 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -(VllmWorker rank=0 pid=530446) INFO 07-09 00:23:50 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -(VllmWorker rank=1 pid=530447) INFO 07-09 00:23:50 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.44 GiB -INFO 07-09 00:23:50 [core.py:159] init engine (profile, create kv cache, warmup model) took 73.30 seconds -INFO 07-09 00:23:50 [core_client.py:439] Core engine process 0 ready. -INFO 07-09 00:33:36 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-09 00:33:36 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:48:58 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:49:06 [config.py:717] This model supports multiple tasks: {'classify', 'generate', 'embed', 'reward', 'score'}. Defaulting to 'generate'. +INFO 07-09 17:49:06 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 17:49:06 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 17:49:08 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_ties_3', speculative_config=None, tokenizer='./merged_llama/llama_ties_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_ties_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 17:49:08 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 17:49:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_4120125d'), local_subscribe_addr='ipc:///tmp/c72e68dc-ba50-42b1-8a87-dded3d656b8d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:49:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6c0e1924'), local_subscribe_addr='ipc:///tmp/0e13ea4c-9e2d-4c63-aed0-b30f9cd27beb', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:49:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7752b405'), local_subscribe_addr='ipc:///tmp/663ba509-a245-47aa-9afd-ad1a67ef7dc1', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:49:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 17:49:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c09873e2'), local_subscribe_addr='ipc:///tmp/c88ce08e-ba67-42dd-8873-dffb97146532', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a1ea3f66'), local_subscribe_addr='ipc:///tmp/434ba3b1-6ab3-4ced-9419-7d9434f2d61a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:09 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:09 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:09 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:09 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:09 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:09 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:09 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:09 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=921522) WARNING 07-09 17:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=921521) WARNING 07-09 17:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=921519) WARNING 07-09 17:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=921520) WARNING 07-09 17:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_f26be01f'), local_subscribe_addr='ipc:///tmp/d66956fa-30ba-4148-bee7-c3909831c3e4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:10 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:10 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:10 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:10 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=921521) WARNING 07-09 17:49:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=921522) WARNING 07-09 17:49:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=921519) WARNING 07-09 17:49:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=921520) WARNING 07-09 17:49:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:10 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_3... +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:10 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_3... +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:10 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_3... +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:10 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_3... +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:24 [loader.py:458] Loading weights took 13.28 seconds +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:24 [loader.py:458] Loading weights took 13.49 seconds +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:24 [loader.py:458] Loading weights took 13.54 seconds +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:24 [loader.py:458] Loading weights took 13.53 seconds +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:24 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.562267 seconds +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:24 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.762330 seconds +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:24 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.765675 seconds +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:24 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.761129 seconds +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:30 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac04124709/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:30 [backends.py:430] Dynamo bytecode transform time: 6.29 s +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:31 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac04124709/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:31 [backends.py:430] Dynamo bytecode transform time: 6.32 s +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:31 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac04124709/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:31 [backends.py:430] Dynamo bytecode transform time: 6.51 s +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:31 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ac04124709/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:31 [backends.py:430] Dynamo bytecode transform time: 6.51 s +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:34 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:34 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:34 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:34 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=921522) INFO 07-09 17:49:55 [backends.py:148] Compiling a graph for general shape takes 23.93 s +(VllmWorker rank=2 pid=921521) INFO 07-09 17:49:55 [backends.py:148] Compiling a graph for general shape takes 23.98 s +(VllmWorker rank=0 pid=921519) INFO 07-09 17:49:56 [backends.py:148] Compiling a graph for general shape takes 24.26 s +(VllmWorker rank=1 pid=921520) INFO 07-09 17:49:56 [backends.py:148] Compiling a graph for general shape takes 24.39 s +(VllmWorker rank=1 pid=921520) INFO 07-09 17:50:09 [monitor.py:33] torch.compile takes 30.89 s in total +(VllmWorker rank=2 pid=921521) INFO 07-09 17:50:09 [monitor.py:33] torch.compile takes 30.27 s in total +(VllmWorker rank=3 pid=921522) INFO 07-09 17:50:09 [monitor.py:33] torch.compile takes 30.25 s in total +(VllmWorker rank=0 pid=921519) INFO 07-09 17:50:09 [monitor.py:33] torch.compile takes 30.77 s in total +INFO 07-09 17:50:10 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 17:50:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 17:50:10 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:50:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:50:10 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:50:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:50:10 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 17:50:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=3 pid=921522) INFO 07-09 17:50:39 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=2 pid=921521) INFO 07-09 17:50:39 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=0 pid=921519) INFO 07-09 17:50:39 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=1 pid=921520) INFO 07-09 17:50:39 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +INFO 07-09 17:50:40 [core.py:159] init engine (profile, create kv cache, warmup model) took 75.35 seconds +INFO 07-09 17:50:40 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 17:54:35 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 17:54:35 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.8667|± |0.0330| -| | |sem |0.6943|± |0.0225| -|mm\|arc_challenge\|0| 0|sem |0.9470|± |0.0125| -|mm\|arc_easy\|0 | 0|sem |0.9677|± |0.0061| -|mm\|commonsenseqa\|0| 0|sem |0.8304|± |0.0223| +|all | |math_pass@1:1_samples|0.7451|± |0.0398| +| | |sem |0.6572|± |0.0255| +|mm\|arc_challenge\|0| 0|sem |0.8723|± |0.0187| +|mm\|arc_easy\|0 | 0|sem |0.9461|± |0.0078| +|mm\|commonsenseqa\|0| 0|sem |0.8127|± |0.0232| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8822|± |0.0135| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8511|± |0.0525| -|mm\|truthfulqa\|0 | 0|sem |0.7262|± |0.0489| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7030|± |0.0192| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7872|± |0.0603| +|mm\|truthfulqa\|0 | 0|sem |0.6548|± |0.0522| diff --git a/merge_llama/logs/llama_ties_5.log b/merge_llama/logs/llama_ties_5.log index 9b5d97e80f415aedd1649cb4c100d9bbd8e155f7..bca81a438a2989ee21192f3afd665647b2feacec 100644 --- a/merge_llama/logs/llama_ties_5.log +++ b/merge_llama/logs/llama_ties_5.log @@ -1,100 +1,100 @@ -INFO 07-09 00:33:35 [__init__.py:239] Automatically detected platform cuda. -INFO 07-09 00:33:44 [config.py:717] This model supports multiple tasks: {'embed', 'generate', 'score', 'classify', 'reward'}. Defaulting to 'generate'. -INFO 07-09 00:33:44 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-09 00:33:44 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-09 00:33:46 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_ties_5', speculative_config=None, tokenizer='./merged2/llama_ties_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_ties_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-09 00:33:46 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-09 00:33:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_36a32a91'), local_subscribe_addr='ipc:///tmp/611c2833-c3c2-4124-811d-59e70c5749af', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:33:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=537035) INFO 07-09 00:33:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_775809a8'), local_subscribe_addr='ipc:///tmp/c5c76a32-bbbb-4c38-a1b8-e25de3fb27a1', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:33:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-09 00:33:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=537036) INFO 07-09 00:33:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2eb5f542'), local_subscribe_addr='ipc:///tmp/20dcdcf4-fe50-4e40-9530-4bd114269339', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:33:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=537037) INFO 07-09 00:33:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a8806c63'), local_subscribe_addr='ipc:///tmp/bce69914-38fa-4442-9929-573313b661ad', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=537038) INFO 07-09 00:33:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8d83c7ae'), local_subscribe_addr='ipc:///tmp/5f012cf6-56a6-4966-87ba-49df9bd6b9db', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=537036) INFO 07-09 00:33:58 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=537036) INFO 07-09 00:33:58 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=537035) INFO 07-09 00:33:58 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=537035) INFO 07-09 00:33:58 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=537037) INFO 07-09 00:33:59 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=537037) INFO 07-09 00:33:59 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=537038) INFO 07-09 00:33:59 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=537038) INFO 07-09 00:33:59 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=1 pid=537036) WARNING 07-09 00:33:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=537035) WARNING 07-09 00:33:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=537038) WARNING 07-09 00:33:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=2 pid=537037) WARNING 07-09 00:33:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=537035) INFO 07-09 00:33:59 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_dbf7fd6d'), local_subscribe_addr='ipc:///tmp/f57e3049-2cda-4a2c-909a-c0b86dc47103', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=537035) INFO 07-09 00:33:59 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=0 pid=537035) INFO 07-09 00:33:59 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=537035) WARNING 07-09 00:33:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=537037) INFO 07-09 00:33:59 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=1 pid=537036) INFO 07-09 00:33:59 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=1 pid=537036) INFO 07-09 00:33:59 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=537038) INFO 07-09 00:33:59 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=1 pid=537036) WARNING 07-09 00:33:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=537037) INFO 07-09 00:33:59 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=537037) WARNING 07-09 00:33:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=537038) INFO 07-09 00:33:59 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=537038) WARNING 07-09 00:33:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=537035) INFO 07-09 00:33:59 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_5... -(VllmWorker rank=2 pid=537037) INFO 07-09 00:33:59 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_5... -(VllmWorker rank=3 pid=537038) INFO 07-09 00:33:59 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_5... -(VllmWorker rank=1 pid=537036) INFO 07-09 00:33:59 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_5... -(VllmWorker rank=0 pid=537035) INFO 07-09 00:34:15 [loader.py:458] Loading weights took 15.93 seconds -(VllmWorker rank=1 pid=537036) INFO 07-09 00:34:15 [loader.py:458] Loading weights took 16.04 seconds -(VllmWorker rank=3 pid=537038) INFO 07-09 00:34:15 [loader.py:458] Loading weights took 16.03 seconds -(VllmWorker rank=2 pid=537037) INFO 07-09 00:34:15 [loader.py:458] Loading weights took 16.01 seconds -(VllmWorker rank=1 pid=537036) INFO 07-09 00:34:16 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.287811 seconds -(VllmWorker rank=3 pid=537038) INFO 07-09 00:34:16 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.289166 seconds -(VllmWorker rank=0 pid=537035) INFO 07-09 00:34:16 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.209153 seconds -(VllmWorker rank=2 pid=537037) INFO 07-09 00:34:16 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 16.294447 seconds -(VllmWorker rank=3 pid=537038) INFO 07-09 00:34:22 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6462abaa9e/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=537038) INFO 07-09 00:34:22 [backends.py:430] Dynamo bytecode transform time: 6.35 s -(VllmWorker rank=1 pid=537036) INFO 07-09 00:34:22 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6462abaa9e/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=537036) INFO 07-09 00:34:22 [backends.py:430] Dynamo bytecode transform time: 6.39 s -(VllmWorker rank=2 pid=537037) INFO 07-09 00:34:22 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6462abaa9e/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=537037) INFO 07-09 00:34:22 [backends.py:430] Dynamo bytecode transform time: 6.39 s -(VllmWorker rank=0 pid=537035) INFO 07-09 00:34:22 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/6462abaa9e/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=537035) INFO 07-09 00:34:22 [backends.py:430] Dynamo bytecode transform time: 6.45 s -(VllmWorker rank=3 pid=537038) INFO 07-09 00:34:25 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=537036) INFO 07-09 00:34:26 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=537037) INFO 07-09 00:34:26 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=537035) INFO 07-09 00:34:26 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=3 pid=537038) INFO 07-09 00:34:47 [backends.py:148] Compiling a graph for general shape takes 24.29 s -(VllmWorker rank=1 pid=537036) INFO 07-09 00:34:47 [backends.py:148] Compiling a graph for general shape takes 24.35 s -(VllmWorker rank=2 pid=537037) INFO 07-09 00:34:47 [backends.py:148] Compiling a graph for general shape takes 24.39 s -(VllmWorker rank=0 pid=537035) INFO 07-09 00:34:48 [backends.py:148] Compiling a graph for general shape takes 24.70 s -(VllmWorker rank=1 pid=537036) INFO 07-09 00:35:01 [monitor.py:33] torch.compile takes 30.74 s in total -(VllmWorker rank=3 pid=537038) INFO 07-09 00:35:01 [monitor.py:33] torch.compile takes 30.64 s in total -(VllmWorker rank=2 pid=537037) INFO 07-09 00:35:01 [monitor.py:33] torch.compile takes 30.78 s in total -(VllmWorker rank=0 pid=537035) INFO 07-09 00:35:01 [monitor.py:33] torch.compile takes 31.15 s in total -INFO 07-09 00:35:02 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-09 00:35:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-09 00:35:02 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:35:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:35:02 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:35:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:35:02 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-09 00:35:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=2 pid=537037) INFO 07-09 00:35:42 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=1 pid=537036) INFO 07-09 00:35:42 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=3 pid=537038) INFO 07-09 00:35:42 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -(VllmWorker rank=0 pid=537035) INFO 07-09 00:35:42 [gpu_model_runner.py:1686] Graph capturing finished in 40 secs, took 2.44 GiB -INFO 07-09 00:35:42 [core.py:159] init engine (profile, create kv cache, warmup model) took 86.64 seconds -INFO 07-09 00:35:43 [core_client.py:439] Core engine process 0 ready. -INFO 07-09 00:45:40 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-09 00:45:40 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:54:34 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 17:54:43 [config.py:717] This model supports multiple tasks: {'embed', 'reward', 'classify', 'generate', 'score'}. Defaulting to 'generate'. +INFO 07-09 17:54:43 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 17:54:43 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 17:54:44 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_ties_5', speculative_config=None, tokenizer='./merged_llama/llama_ties_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_ties_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 17:54:44 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 17:54:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_76273d6b'), local_subscribe_addr='ipc:///tmp/b619bdf6-5864-4463-9cf0-0340ee0af274', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:54:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=926013) INFO 07-09 17:54:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b971d566'), local_subscribe_addr='ipc:///tmp/d759c481-c7f5-4df6-b220-058e4d8137e9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 17:54:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 17:54:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 17:54:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=926014) INFO 07-09 17:54:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d5c234a6'), local_subscribe_addr='ipc:///tmp/5c7ef965-b710-439b-b955-d4faea567e3a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=926015) INFO 07-09 17:54:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ba4a239a'), local_subscribe_addr='ipc:///tmp/07472cf9-7fe8-454a-bd59-c36247403c7a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=926016) INFO 07-09 17:54:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b17010d9'), local_subscribe_addr='ipc:///tmp/a4343c87-259d-4470-9536-8fe21b9a02ff', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=926013) INFO 07-09 17:54:46 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=926013) INFO 07-09 17:54:46 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=926014) INFO 07-09 17:54:46 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=926014) INFO 07-09 17:54:46 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=926016) INFO 07-09 17:54:46 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=926015) INFO 07-09 17:54:46 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=926016) INFO 07-09 17:54:46 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=926015) INFO 07-09 17:54:46 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=926016) WARNING 07-09 17:54:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=926015) WARNING 07-09 17:54:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=926013) WARNING 07-09 17:54:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=926014) WARNING 07-09 17:54:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=926013) INFO 07-09 17:54:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_1f48bea8'), local_subscribe_addr='ipc:///tmp/b409e1dc-02a4-45fd-8af1-97d936278762', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=926015) INFO 07-09 17:54:46 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=926014) INFO 07-09 17:54:46 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=926016) INFO 07-09 17:54:46 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=3 pid=926016) INFO 07-09 17:54:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=926015) INFO 07-09 17:54:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=926016) WARNING 07-09 17:54:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=926015) WARNING 07-09 17:54:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=926014) INFO 07-09 17:54:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=926014) WARNING 07-09 17:54:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=926016) INFO 07-09 17:54:47 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_5... +(VllmWorker rank=2 pid=926015) INFO 07-09 17:54:47 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_5... +(VllmWorker rank=1 pid=926014) INFO 07-09 17:54:47 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_5... +(VllmWorker rank=0 pid=926013) INFO 07-09 17:54:47 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=926013) INFO 07-09 17:54:47 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=926013) WARNING 07-09 17:54:47 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=926013) INFO 07-09 17:54:47 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_5... +(VllmWorker rank=0 pid=926013) INFO 07-09 17:55:01 [loader.py:458] Loading weights took 13.90 seconds +(VllmWorker rank=3 pid=926016) INFO 07-09 17:55:01 [loader.py:458] Loading weights took 14.04 seconds +(VllmWorker rank=2 pid=926015) INFO 07-09 17:55:01 [loader.py:458] Loading weights took 14.04 seconds +(VllmWorker rank=1 pid=926014) INFO 07-09 17:55:01 [loader.py:458] Loading weights took 13.98 seconds +(VllmWorker rank=0 pid=926013) INFO 07-09 17:55:01 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.180173 seconds +(VllmWorker rank=3 pid=926016) INFO 07-09 17:55:01 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.281678 seconds +(VllmWorker rank=2 pid=926015) INFO 07-09 17:55:01 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.281836 seconds +(VllmWorker rank=1 pid=926014) INFO 07-09 17:55:01 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.288620 seconds +(VllmWorker rank=2 pid=926015) INFO 07-09 17:55:07 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7a0c27311b/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=926015) INFO 07-09 17:55:07 [backends.py:430] Dynamo bytecode transform time: 6.34 s +(VllmWorker rank=1 pid=926014) INFO 07-09 17:55:08 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7a0c27311b/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=926014) INFO 07-09 17:55:08 [backends.py:430] Dynamo bytecode transform time: 6.51 s +(VllmWorker rank=3 pid=926016) INFO 07-09 17:55:08 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7a0c27311b/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=926016) INFO 07-09 17:55:08 [backends.py:430] Dynamo bytecode transform time: 6.52 s +(VllmWorker rank=0 pid=926013) INFO 07-09 17:55:08 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/7a0c27311b/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=926013) INFO 07-09 17:55:08 [backends.py:430] Dynamo bytecode transform time: 6.57 s +(VllmWorker rank=2 pid=926015) INFO 07-09 17:55:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=926016) INFO 07-09 17:55:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=926014) INFO 07-09 17:55:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=926013) INFO 07-09 17:55:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=926015) INFO 07-09 17:55:32 [backends.py:148] Compiling a graph for general shape takes 24.14 s +(VllmWorker rank=1 pid=926014) INFO 07-09 17:55:33 [backends.py:148] Compiling a graph for general shape takes 24.19 s +(VllmWorker rank=3 pid=926016) INFO 07-09 17:55:33 [backends.py:148] Compiling a graph for general shape takes 24.49 s +(VllmWorker rank=0 pid=926013) INFO 07-09 17:55:33 [backends.py:148] Compiling a graph for general shape takes 24.33 s +(VllmWorker rank=0 pid=926013) INFO 07-09 17:55:46 [monitor.py:33] torch.compile takes 30.90 s in total +(VllmWorker rank=1 pid=926014) INFO 07-09 17:55:46 [monitor.py:33] torch.compile takes 30.70 s in total +(VllmWorker rank=2 pid=926015) INFO 07-09 17:55:46 [monitor.py:33] torch.compile takes 30.48 s in total +(VllmWorker rank=3 pid=926016) INFO 07-09 17:55:46 [monitor.py:33] torch.compile takes 31.01 s in total +INFO 07-09 17:55:47 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 17:55:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 17:55:47 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:55:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:55:47 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 17:55:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 17:55:47 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 17:55:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=2 pid=926015) INFO 07-09 17:56:16 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=3 pid=926016) INFO 07-09 17:56:16 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=0 pid=926013) INFO 07-09 17:56:16 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +(VllmWorker rank=1 pid=926014) INFO 07-09 17:56:16 [gpu_model_runner.py:1686] Graph capturing finished in 29 secs, took 2.44 GiB +INFO 07-09 17:56:16 [core.py:159] init engine (profile, create kv cache, warmup model) took 74.93 seconds +INFO 07-09 17:56:16 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 18:00:19 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 18:00:19 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.9411|± |0.0068| -| | |sem |0.6766|± |0.0238| -|mm\|arc_challenge\|0| 0|sem |0.9346|± |0.0138| -|mm\|arc_easy\|0 | 0|sem |0.9641|± |0.0064| -|mm\|commonsenseqa\|0| 0|sem |0.8057|± |0.0236| +|all | |math_pass@1:1_samples|0.8062|± |0.0322| +| | |sem |0.6749|± |0.0243| +|mm\|arc_challenge\|0| 0|sem |0.9097|± |0.0160| +|mm\|arc_easy\|0 | 0|sem |0.9425|± |0.0081| +|mm\|commonsenseqa\|0| 0|sem |0.8198|± |0.0229| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.8822|± |0.0135| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| -|mm\|truthfulqa\|0 | 0|sem |0.6786|± |0.0513| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7188|± |0.0189| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8936|± |0.0455| +|mm\|truthfulqa\|0 | 0|sem |0.7024|± |0.0502| diff --git a/merge_llama/logs/llama_ties_7.log b/merge_llama/logs/llama_ties_7.log index 09384d43f1356bfc4df7dcc1105ef3ddf0c0f272..05c4956f2ba7d345fc32c0bc880b16422015f438 100644 --- a/merge_llama/logs/llama_ties_7.log +++ b/merge_llama/logs/llama_ties_7.log @@ -1,100 +1,100 @@ -INFO 07-09 00:45:39 [__init__.py:239] Automatically detected platform cuda. -INFO 07-09 00:45:47 [config.py:717] This model supports multiple tasks: {'classify', 'generate', 'score', 'reward', 'embed'}. Defaulting to 'generate'. -INFO 07-09 00:45:47 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-09 00:45:47 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-09 00:45:49 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_ties_7', speculative_config=None, tokenizer='./merged2/llama_ties_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_ties_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-09 00:45:49 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-09 00:45:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_a3cf4d2f'), local_subscribe_addr='ipc:///tmp/72e71256-af18-4947-8269-b7b77083388d', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:45:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=543606) INFO 07-09 00:45:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ea367d82'), local_subscribe_addr='ipc:///tmp/deec4ca9-a303-416c-bcc7-6f9047f2ac26', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:45:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=0 pid=543605) INFO 07-09 00:45:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_65989810'), local_subscribe_addr='ipc:///tmp/5fd19ca0-a9b1-4376-99ac-bb6eeba7103c', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:45:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=543607) INFO 07-09 00:45:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5f38be70'), local_subscribe_addr='ipc:///tmp/e89dad31-1af2-4f0f-a1f1-5a27472377ca', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:45:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=3 pid=543608) INFO 07-09 00:45:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_97b5bd5a'), local_subscribe_addr='ipc:///tmp/13a792b8-9200-4c09-b7cd-aa12eb18ebb7', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=543606) INFO 07-09 00:45:52 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=543605) INFO 07-09 00:45:52 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=543606) INFO 07-09 00:45:52 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=543605) INFO 07-09 00:45:52 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=543608) INFO 07-09 00:45:52 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=543607) INFO 07-09 00:45:52 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=543608) INFO 07-09 00:45:52 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=543607) INFO 07-09 00:45:52 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=543607) WARNING 07-09 00:45:52 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=543608) WARNING 07-09 00:45:52 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=543606) WARNING 07-09 00:45:52 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=543605) WARNING 07-09 00:45:52 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=543605) INFO 07-09 00:45:52 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_7e24baf1'), local_subscribe_addr='ipc:///tmp/0afd1442-b4f3-418e-b183-eb7559030c2c', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=543608) INFO 07-09 00:45:52 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=1 pid=543606) INFO 07-09 00:45:52 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=2 pid=543607) INFO 07-09 00:45:52 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=0 pid=543605) INFO 07-09 00:45:52 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=3 pid=543608) INFO 07-09 00:45:52 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=543607) INFO 07-09 00:45:52 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=543608) WARNING 07-09 00:45:52 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=543607) WARNING 07-09 00:45:52 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=543605) INFO 07-09 00:45:52 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=543606) INFO 07-09 00:45:52 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=543605) WARNING 07-09 00:45:52 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=543606) WARNING 07-09 00:45:52 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=543607) INFO 07-09 00:45:52 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_7... -(VllmWorker rank=3 pid=543608) INFO 07-09 00:45:52 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_7... -(VllmWorker rank=1 pid=543606) INFO 07-09 00:45:52 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_7... -(VllmWorker rank=0 pid=543605) INFO 07-09 00:45:52 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_7... -(VllmWorker rank=3 pid=543608) INFO 07-09 00:46:06 [loader.py:458] Loading weights took 13.68 seconds -(VllmWorker rank=2 pid=543607) INFO 07-09 00:46:06 [loader.py:458] Loading weights took 13.74 seconds -(VllmWorker rank=0 pid=543605) INFO 07-09 00:46:06 [loader.py:458] Loading weights took 13.68 seconds -(VllmWorker rank=1 pid=543606) INFO 07-09 00:46:06 [loader.py:458] Loading weights took 13.70 seconds -(VllmWorker rank=3 pid=543608) INFO 07-09 00:46:06 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.896493 seconds -(VllmWorker rank=2 pid=543607) INFO 07-09 00:46:06 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.965019 seconds -(VllmWorker rank=1 pid=543606) INFO 07-09 00:46:06 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.965931 seconds -(VllmWorker rank=0 pid=543605) INFO 07-09 00:46:06 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 13.959341 seconds -(VllmWorker rank=2 pid=543607) INFO 07-09 00:46:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1243c53fbf/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=543607) INFO 07-09 00:46:13 [backends.py:430] Dynamo bytecode transform time: 6.50 s -(VllmWorker rank=3 pid=543608) INFO 07-09 00:46:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1243c53fbf/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=543608) INFO 07-09 00:46:13 [backends.py:430] Dynamo bytecode transform time: 6.50 s -(VllmWorker rank=1 pid=543606) INFO 07-09 00:46:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1243c53fbf/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=543606) INFO 07-09 00:46:13 [backends.py:430] Dynamo bytecode transform time: 6.54 s -(VllmWorker rank=0 pid=543605) INFO 07-09 00:46:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/1243c53fbf/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=543605) INFO 07-09 00:46:13 [backends.py:430] Dynamo bytecode transform time: 6.62 s -(VllmWorker rank=3 pid=543608) INFO 07-09 00:46:16 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=543607) INFO 07-09 00:46:16 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=543606) INFO 07-09 00:46:16 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=543605) INFO 07-09 00:46:17 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=543605) INFO 07-09 00:46:38 [backends.py:148] Compiling a graph for general shape takes 24.39 s -(VllmWorker rank=3 pid=543608) INFO 07-09 00:46:38 [backends.py:148] Compiling a graph for general shape takes 24.77 s -(VllmWorker rank=2 pid=543607) INFO 07-09 00:46:39 [backends.py:148] Compiling a graph for general shape takes 24.88 s -(VllmWorker rank=1 pid=543606) INFO 07-09 00:46:39 [backends.py:148] Compiling a graph for general shape takes 25.05 s -(VllmWorker rank=2 pid=543607) INFO 07-09 00:46:52 [monitor.py:33] torch.compile takes 31.38 s in total -(VllmWorker rank=1 pid=543606) INFO 07-09 00:46:52 [monitor.py:33] torch.compile takes 31.59 s in total -(VllmWorker rank=0 pid=543605) INFO 07-09 00:46:52 [monitor.py:33] torch.compile takes 31.01 s in total -(VllmWorker rank=3 pid=543608) INFO 07-09 00:46:52 [monitor.py:33] torch.compile takes 31.28 s in total -INFO 07-09 00:46:53 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-09 00:46:53 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-09 00:46:53 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:46:53 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:46:53 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:46:53 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:46:53 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-09 00:46:53 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=3 pid=543608) INFO 07-09 00:47:23 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=1 pid=543606) INFO 07-09 00:47:23 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=2 pid=543607) INFO 07-09 00:47:23 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=0 pid=543605) INFO 07-09 00:47:23 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -INFO 07-09 00:47:23 [core.py:159] init engine (profile, create kv cache, warmup model) took 76.72 seconds -INFO 07-09 00:47:24 [core_client.py:439] Core engine process 0 ready. -INFO 07-09 00:58:41 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-09 00:58:41 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 18:00:18 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 18:00:27 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'classify', 'generate', 'reward'}. Defaulting to 'generate'. +INFO 07-09 18:00:27 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 18:00:27 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 18:00:28 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_ties_7', speculative_config=None, tokenizer='./merged_llama/llama_ties_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_ties_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 18:00:28 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 18:00:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_cce1c565'), local_subscribe_addr='ipc:///tmp/53e654fe-bd26-4d00-850a-7e1feb3e1744', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 18:00:29 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a5e521ae'), local_subscribe_addr='ipc:///tmp/c8d4473d-db79-4a10-a47c-c48c45f35af0', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 18:00:29 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e978a0c6'), local_subscribe_addr='ipc:///tmp/80fd9206-7aee-4cb8-a868-9cb989ab2477', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 18:00:29 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 18:00:29 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2003116e'), local_subscribe_addr='ipc:///tmp/6969bf4d-e7f6-433d-a849-a34fe9fd3f73', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_eaa6ffdd'), local_subscribe_addr='ipc:///tmp/2fe802f9-66af-4347-831b-5c8e37d6e427', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=930563) WARNING 07-09 18:00:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=930562) WARNING 07-09 18:00:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=930560) WARNING 07-09 18:00:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=930561) WARNING 07-09 18:00:31 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:31 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_39532c5c'), local_subscribe_addr='ipc:///tmp/cb17b1a6-d4a8-490a-baef-a109173961d6', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:31 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:31 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:31 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:31 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:31 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:31 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:31 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=930562) WARNING 07-09 18:00:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=930563) WARNING 07-09 18:00:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:31 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=930560) WARNING 07-09 18:00:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=930561) WARNING 07-09 18:00:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:31 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_7... +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:31 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_7... +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:31 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_7... +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:31 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_7... +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:45 [loader.py:458] Loading weights took 13.83 seconds +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:45 [loader.py:458] Loading weights took 13.91 seconds +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:45 [loader.py:458] Loading weights took 13.87 seconds +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:45 [loader.py:458] Loading weights took 13.87 seconds +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:45 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.047670 seconds +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:45 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.134940 seconds +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:45 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.133397 seconds +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:45 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 14.133280 seconds +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:51 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0dd8fc0d38/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:51 [backends.py:430] Dynamo bytecode transform time: 6.32 s +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:52 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0dd8fc0d38/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:52 [backends.py:430] Dynamo bytecode transform time: 6.43 s +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:52 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0dd8fc0d38/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:52 [backends.py:430] Dynamo bytecode transform time: 6.44 s +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:52 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0dd8fc0d38/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:52 [backends.py:430] Dynamo bytecode transform time: 6.49 s +(VllmWorker rank=3 pid=930563) INFO 07-09 18:00:55 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=930562) INFO 07-09 18:00:55 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=930561) INFO 07-09 18:00:55 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=930560) INFO 07-09 18:00:55 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=930563) INFO 07-09 18:01:17 [backends.py:148] Compiling a graph for general shape takes 24.46 s +(VllmWorker rank=2 pid=930562) INFO 07-09 18:01:17 [backends.py:148] Compiling a graph for general shape takes 24.34 s +(VllmWorker rank=1 pid=930561) INFO 07-09 18:01:17 [backends.py:148] Compiling a graph for general shape takes 24.76 s +(VllmWorker rank=0 pid=930560) INFO 07-09 18:01:18 [backends.py:148] Compiling a graph for general shape takes 25.20 s +(VllmWorker rank=2 pid=930562) INFO 07-09 18:01:31 [monitor.py:33] torch.compile takes 30.78 s in total +(VllmWorker rank=0 pid=930560) INFO 07-09 18:01:31 [monitor.py:33] torch.compile takes 31.69 s in total +(VllmWorker rank=3 pid=930563) INFO 07-09 18:01:31 [monitor.py:33] torch.compile takes 30.77 s in total +(VllmWorker rank=1 pid=930561) INFO 07-09 18:01:31 [monitor.py:33] torch.compile takes 31.19 s in total +INFO 07-09 18:01:32 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 18:01:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 18:01:32 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 18:01:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 18:01:32 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 18:01:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 18:01:32 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 18:01:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=2 pid=930562) INFO 07-09 18:02:05 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +(VllmWorker rank=3 pid=930563) INFO 07-09 18:02:06 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +(VllmWorker rank=1 pid=930561) INFO 07-09 18:02:06 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +(VllmWorker rank=0 pid=930560) INFO 07-09 18:02:06 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 2.44 GiB +INFO 07-09 18:02:06 [core.py:159] init engine (profile, create kv cache, warmup model) took 80.59 seconds +INFO 07-09 18:02:06 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 18:06:19 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 18:06:19 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.5137|± |0.0473| -| | |sem |0.3423|± |0.0317| -|mm\|arc_challenge\|0| 0|sem |0.4548|± |0.0278| -|mm\|arc_easy\|0 | 0|sem |0.4982|± |0.0173| -|mm\|commonsenseqa\|0| 0|sem |0.4134|± |0.0293| +|all | |math_pass@1:1_samples|0.8655|± |0.0197| +| | |sem |0.6794|± |0.0240| +|mm\|arc_challenge\|0| 0|sem |0.9003|± |0.0167| +|mm\|arc_easy\|0 | 0|sem |0.9341|± |0.0086| +|mm\|commonsenseqa\|0| 0|sem |0.8127|± |0.0232| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.5167|± |0.0210| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.5106|± |0.0737| -|mm\|truthfulqa\|0 | 0|sem |0.3452|± |0.0522| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7522|± |0.0181| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9787|± |0.0213| +|mm\|truthfulqa\|0 | 0|sem |0.7500|± |0.0475| diff --git a/merge_llama/logs/llama_ties_9.log b/merge_llama/logs/llama_ties_9.log index c0988818450967b3cdd2f93eada2dc94a0b94a26..82efe817bbc6516174e48991474df66ae8726270 100644 --- a/merge_llama/logs/llama_ties_9.log +++ b/merge_llama/logs/llama_ties_9.log @@ -1,100 +1,100 @@ -INFO 07-09 00:58:40 [__init__.py:239] Automatically detected platform cuda. -INFO 07-09 00:58:49 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'reward', 'embed', 'classify'}. Defaulting to 'generate'. -INFO 07-09 00:58:49 [config.py:1770] Defaulting to use mp for distributed inference -INFO 07-09 00:58:49 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. -INFO 07-09 00:58:50 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged2/llama_ties_9', speculative_config=None, tokenizer='./merged2/llama_ties_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged2/llama_ties_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} -WARNING 07-09 00:58:50 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. -INFO 07-09 00:58:50 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_ff185e1e'), local_subscribe_addr='ipc:///tmp/11203ebb-bc3a-42fb-955b-82a28a9ed260', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:58:51 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-09 00:58:51 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=1 pid=549669) INFO 07-09 00:58:51 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_aa588064'), local_subscribe_addr='ipc:///tmp/630e5164-739b-4042-9439-e10ea086226f', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=0 pid=549668) INFO 07-09 00:58:51 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f7b24a90'), local_subscribe_addr='ipc:///tmp/83153c54-81dd-44fe-b96e-7c78e4cdb5b8', remote_subscribe_addr=None, remote_addr_ipv6=False) -WARNING 07-09 00:58:51 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -WARNING 07-09 00:58:51 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in -(VllmWorker rank=2 pid=549670) INFO 07-09 00:58:51 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d6e4736e'), local_subscribe_addr='ipc:///tmp/4619752f-b97d-4c16-9b5b-460535d6fdaf', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=549671) INFO 07-09 00:58:51 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_999aa29e'), local_subscribe_addr='ipc:///tmp/b3b77c9d-6a6b-48d4-8159-037fb77d3bdc', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=1 pid=549669) INFO 07-09 00:58:53 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=0 pid=549668) INFO 07-09 00:58:53 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=1 pid=549669) INFO 07-09 00:58:53 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=0 pid=549668) INFO 07-09 00:58:53 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=549670) INFO 07-09 00:58:53 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=3 pid=549671) INFO 07-09 00:58:53 [utils.py:1055] Found nccl from library libnccl.so.2 -(VllmWorker rank=2 pid=549670) INFO 07-09 00:58:53 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=3 pid=549671) INFO 07-09 00:58:53 [pynccl.py:69] vLLM is using nccl==2.21.5 -(VllmWorker rank=2 pid=549670) WARNING 07-09 00:58:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=3 pid=549671) WARNING 07-09 00:58:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=549668) WARNING 07-09 00:58:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=1 pid=549669) WARNING 07-09 00:58:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. -(VllmWorker rank=0 pid=549668) INFO 07-09 00:58:53 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_cd06b9c8'), local_subscribe_addr='ipc:///tmp/c00bc0aa-6e41-418a-9145-628414f1e3ca', remote_subscribe_addr=None, remote_addr_ipv6=False) -(VllmWorker rank=3 pid=549671) INFO 07-09 00:58:53 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 -(VllmWorker rank=2 pid=549670) INFO 07-09 00:58:53 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 -(VllmWorker rank=0 pid=549668) INFO 07-09 00:58:53 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 -(VllmWorker rank=1 pid=549669) INFO 07-09 00:58:53 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 -(VllmWorker rank=2 pid=549670) INFO 07-09 00:58:53 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=3 pid=549671) INFO 07-09 00:58:53 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=2 pid=549670) WARNING 07-09 00:58:53 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=3 pid=549671) WARNING 07-09 00:58:53 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=1 pid=549669) INFO 07-09 00:58:53 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=1 pid=549669) WARNING 07-09 00:58:54 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=2 pid=549670) INFO 07-09 00:58:54 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_9... -(VllmWorker rank=3 pid=549671) INFO 07-09 00:58:54 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_9... -(VllmWorker rank=1 pid=549669) INFO 07-09 00:58:54 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_9... -(VllmWorker rank=0 pid=549668) INFO 07-09 00:58:54 [cuda.py:221] Using Flash Attention backend on V1 engine. -(VllmWorker rank=0 pid=549668) WARNING 07-09 00:58:54 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -(VllmWorker rank=0 pid=549668) INFO 07-09 00:58:54 [gpu_model_runner.py:1329] Starting to load model ./merged2/llama_ties_9... -(VllmWorker rank=3 pid=549671) INFO 07-09 00:59:03 [loader.py:458] Loading weights took 9.59 seconds -(VllmWorker rank=0 pid=549668) INFO 07-09 00:59:03 [loader.py:458] Loading weights took 9.71 seconds -(VllmWorker rank=2 pid=549670) INFO 07-09 00:59:03 [loader.py:458] Loading weights took 9.76 seconds -(VllmWorker rank=1 pid=549669) INFO 07-09 00:59:03 [loader.py:458] Loading weights took 9.71 seconds -(VllmWorker rank=3 pid=549671) INFO 07-09 00:59:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 9.822053 seconds -(VllmWorker rank=2 pid=549670) INFO 07-09 00:59:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 9.988516 seconds -(VllmWorker rank=1 pid=549669) INFO 07-09 00:59:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 9.984207 seconds -(VllmWorker rank=0 pid=549668) INFO 07-09 00:59:04 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 9.964846 seconds -(VllmWorker rank=3 pid=549671) INFO 07-09 00:59:10 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a4cc4bdb58/rank_3_0 for vLLM's torch.compile -(VllmWorker rank=3 pid=549671) INFO 07-09 00:59:10 [backends.py:430] Dynamo bytecode transform time: 6.39 s -(VllmWorker rank=2 pid=549670) INFO 07-09 00:59:10 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a4cc4bdb58/rank_2_0 for vLLM's torch.compile -(VllmWorker rank=2 pid=549670) INFO 07-09 00:59:10 [backends.py:430] Dynamo bytecode transform time: 6.48 s -(VllmWorker rank=1 pid=549669) INFO 07-09 00:59:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a4cc4bdb58/rank_1_0 for vLLM's torch.compile -(VllmWorker rank=1 pid=549669) INFO 07-09 00:59:11 [backends.py:430] Dynamo bytecode transform time: 6.54 s -(VllmWorker rank=0 pid=549668) INFO 07-09 00:59:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/a4cc4bdb58/rank_0_0 for vLLM's torch.compile -(VllmWorker rank=0 pid=549668) INFO 07-09 00:59:11 [backends.py:430] Dynamo bytecode transform time: 6.61 s -(VllmWorker rank=3 pid=549671) INFO 07-09 00:59:14 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=2 pid=549670) INFO 07-09 00:59:14 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=1 pid=549669) INFO 07-09 00:59:14 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=549668) INFO 07-09 00:59:14 [backends.py:136] Cache the graph of shape None for later use -(VllmWorker rank=0 pid=549668) INFO 07-09 00:59:36 [backends.py:148] Compiling a graph for general shape takes 24.35 s -(VllmWorker rank=3 pid=549671) INFO 07-09 00:59:36 [backends.py:148] Compiling a graph for general shape takes 24.72 s -(VllmWorker rank=2 pid=549670) INFO 07-09 00:59:36 [backends.py:148] Compiling a graph for general shape takes 24.70 s -(VllmWorker rank=1 pid=549669) INFO 07-09 00:59:36 [backends.py:148] Compiling a graph for general shape takes 25.05 s -(VllmWorker rank=0 pid=549668) INFO 07-09 00:59:49 [monitor.py:33] torch.compile takes 30.97 s in total -(VllmWorker rank=1 pid=549669) INFO 07-09 00:59:49 [monitor.py:33] torch.compile takes 31.59 s in total -(VllmWorker rank=3 pid=549671) INFO 07-09 00:59:49 [monitor.py:33] torch.compile takes 31.11 s in total -(VllmWorker rank=2 pid=549670) INFO 07-09 00:59:49 [monitor.py:33] torch.compile takes 31.17 s in total -INFO 07-09 00:59:51 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens -INFO 07-09 00:59:51 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x -INFO 07-09 00:59:51 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:59:51 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:59:51 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens -INFO 07-09 00:59:51 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x -INFO 07-09 00:59:51 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens -INFO 07-09 00:59:51 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x -(VllmWorker rank=3 pid=549671) INFO 07-09 01:00:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=2 pid=549670) INFO 07-09 01:00:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=1 pid=549669) INFO 07-09 01:00:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -(VllmWorker rank=0 pid=549668) INFO 07-09 01:00:20 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.44 GiB -INFO 07-09 01:00:20 [core.py:159] init engine (profile, create kv cache, warmup model) took 76.33 seconds -INFO 07-09 01:00:21 [core_client.py:439] Core engine process 0 ready. -INFO 07-09 01:11:41 [importing.py:53] Triton module has been replaced with a placeholder. -INFO 07-09 01:11:41 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 18:06:18 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 18:06:27 [config.py:717] This model supports multiple tasks: {'classify', 'embed', 'generate', 'reward', 'score'}. Defaulting to 'generate'. +INFO 07-09 18:06:28 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 18:06:28 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 18:06:29 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_llama/llama_ties_9', speculative_config=None, tokenizer='./merged_llama/llama_ties_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_llama/llama_ties_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 18:06:29 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 18:06:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_d907a448'), local_subscribe_addr='ipc:///tmp/a4c6db4a-1076-493b-87fb-4736e61d8b13', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 18:06:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d3ac681e'), local_subscribe_addr='ipc:///tmp/f79cbd99-cde7-4448-b2ab-aefff45832e9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 18:06:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a1580e93'), local_subscribe_addr='ipc:///tmp/f1979477-ca0b-408c-b99b-78fb3600c7c4', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 18:06:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 18:06:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_90ee0dd1'), local_subscribe_addr='ipc:///tmp/8bad26fd-882c-40d4-90c0-1b5f4836bdb8', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1348937d'), local_subscribe_addr='ipc:///tmp/0e604c4b-4207-4e51-831a-3ddd86efc94b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=935220) WARNING 07-09 18:06:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=935219) WARNING 07-09 18:06:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=935222) WARNING 07-09 18:06:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=935221) WARNING 07-09 18:06:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_e8896114'), local_subscribe_addr='ipc:///tmp/fbc7ad1e-9351-4d8f-a164-3a83c1dcf8a7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:32 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:32 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:32 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:32 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=935220) WARNING 07-09 18:06:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=935219) WARNING 07-09 18:06:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=935222) WARNING 07-09 18:06:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=935221) WARNING 07-09 18:06:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:32 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_9... +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:32 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_9... +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:32 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_9... +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:32 [gpu_model_runner.py:1329] Starting to load model ./merged_llama/llama_ties_9... +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:47 [loader.py:458] Loading weights took 15.40 seconds +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:48 [loader.py:458] Loading weights took 15.54 seconds +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:48 [loader.py:458] Loading weights took 15.53 seconds +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:48 [loader.py:458] Loading weights took 15.54 seconds +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.658867 seconds +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.803430 seconds +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.803629 seconds +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:48 [gpu_model_runner.py:1347] Model loading took 3.7711 GiB and 15.799964 seconds +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:54 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8c90116ffc/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:54 [backends.py:430] Dynamo bytecode transform time: 6.50 s +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:54 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8c90116ffc/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:54 [backends.py:430] Dynamo bytecode transform time: 6.50 s +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:55 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8c90116ffc/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:55 [backends.py:430] Dynamo bytecode transform time: 6.58 s +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:55 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8c90116ffc/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:55 [backends.py:430] Dynamo bytecode transform time: 6.61 s +(VllmWorker rank=3 pid=935222) INFO 07-09 18:06:58 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=935221) INFO 07-09 18:06:58 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=935219) INFO 07-09 18:06:58 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=935220) INFO 07-09 18:06:58 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=935222) INFO 07-09 18:07:20 [backends.py:148] Compiling a graph for general shape takes 25.01 s +(VllmWorker rank=0 pid=935219) INFO 07-09 18:07:20 [backends.py:148] Compiling a graph for general shape takes 25.18 s +(VllmWorker rank=2 pid=935221) INFO 07-09 18:07:20 [backends.py:148] Compiling a graph for general shape takes 25.30 s +(VllmWorker rank=1 pid=935220) INFO 07-09 18:07:21 [backends.py:148] Compiling a graph for general shape takes 25.35 s +(VllmWorker rank=1 pid=935220) INFO 07-09 18:07:34 [monitor.py:33] torch.compile takes 31.97 s in total +(VllmWorker rank=0 pid=935219) INFO 07-09 18:07:34 [monitor.py:33] torch.compile takes 31.76 s in total +(VllmWorker rank=2 pid=935221) INFO 07-09 18:07:34 [monitor.py:33] torch.compile takes 31.80 s in total +(VllmWorker rank=3 pid=935222) INFO 07-09 18:07:34 [monitor.py:33] torch.compile takes 31.51 s in total +INFO 07-09 18:07:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,880 tokens +INFO 07-09 18:07:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.66x +INFO 07-09 18:07:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 18:07:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 18:07:35 [kv_cache_utils.py:634] GPU KV cache size: 2,028,624 tokens +INFO 07-09 18:07:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 990.54x +INFO 07-09 18:07:35 [kv_cache_utils.py:634] GPU KV cache size: 2,029,904 tokens +INFO 07-09 18:07:35 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 991.16x +(VllmWorker rank=1 pid=935220) INFO 07-09 18:08:13 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 2.44 GiB +(VllmWorker rank=2 pid=935221) INFO 07-09 18:08:13 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 2.44 GiB +(VllmWorker rank=0 pid=935219) INFO 07-09 18:08:13 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 2.44 GiB +(VllmWorker rank=3 pid=935222) INFO 07-09 18:08:14 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 2.44 GiB +INFO 07-09 18:08:14 [core.py:159] init engine (profile, create kv cache, warmup model) took 85.82 seconds +INFO 07-09 18:08:14 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 18:12:23 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 18:12:23 [__init__.py:239] Automatically detected platform cuda. | Task |Version| Metric |Value | |Stderr| |------------------|------:|---------------------|-----:|---|-----:| -|all | |math_pass@1:1_samples|0.5278|± |0.0473| -| | |sem |0.4440|± |0.0321| -|mm\|arc_challenge\|0| 0|sem |0.5732|± |0.0276| -|mm\|arc_easy\|0 | 0|sem |0.6323|± |0.0167| -|mm\|commonsenseqa\|0| 0|sem |0.5618|± |0.0295| +|all | |math_pass@1:1_samples|0.8336|± |0.0273| +| | |sem |0.6739|± |0.0245| +|mm\|arc_challenge\|0| 0|sem |0.8879|± |0.0176| +|mm\|arc_easy\|0 | 0|sem |0.9461|± |0.0078| +|mm\|commonsenseqa\|0| 0|sem |0.8092|± |0.0234| |mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.5237|± |0.0210| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.5319|± |0.0736| -|mm\|truthfulqa\|0 | 0|sem |0.4524|± |0.0546| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7311|± |0.0186| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9362|± |0.0360| +|mm\|truthfulqa\|0 | 0|sem |0.7262|± |0.0489| diff --git a/merge_llama/logs/show_results.log b/merge_llama/logs/show_results.log index 2b215b70ddc6dbea85c48e21e6d023dc15668359..3d6138de11cc84736cde6bb0e13388b2ca633cb6 100644 --- a/merge_llama/logs/show_results.log +++ b/merge_llama/logs/show_results.log @@ -1,108 +1,137 @@ -| Task |Version| Metric | Model |Value | |Stderr| -|------------------|------:|---------------------|-----------------------------|-----:|---|-----:| -|mm\|arc_challenge\|0| 0|sem |._merged2_llama_dare_linear_1|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_3|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_5|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_7|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_9|0.0000|± |0.0000| -| | | |._merged2_llama_linear_1 |0.9502|± |0.0122| -| | | |._merged2_llama_linear_3 |0.9377|± |0.0135| -| | | |._merged2_llama_linear_5 |0.9346|± |0.0138| -| | | |._merged2_llama_linear_7 |0.9065|± |0.0163| -| | | |._merged2_llama_linear_9 |0.8006|± |0.0223| -| | | |._merged2_llama_ties_1 |0.9190|± |0.0153| -| | | |._merged2_llama_ties_3 |0.9470|± |0.0125| -| | | |._merged2_llama_ties_5 |0.9346|± |0.0138| -| | | |._merged2_llama_ties_7 |0.4548|± |0.0278| -| | | |._merged2_llama_ties_9 |0.5732|± |0.0276| -|mm\|arc_easy\|0 | 0|sem |._merged2_llama_dare_linear_1|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_3|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_5|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_7|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_9|0.0000|± |0.0000| -| | | |._merged2_llama_linear_1 |0.9737|± |0.0055| -| | | |._merged2_llama_linear_3 |0.9820|± |0.0046| -| | | |._merged2_llama_linear_5 |0.9749|± |0.0054| -| | | |._merged2_llama_linear_7 |0.9653|± |0.0063| -| | | |._merged2_llama_linear_9 |0.8587|± |0.0121| -| | | |._merged2_llama_ties_1 |0.9677|± |0.0061| -| | | |._merged2_llama_ties_3 |0.9677|± |0.0061| -| | | |._merged2_llama_ties_5 |0.9641|± |0.0064| -| | | |._merged2_llama_ties_7 |0.4982|± |0.0173| -| | | |._merged2_llama_ties_9 |0.6323|± |0.0167| -|mm\|commonsenseqa\|0| 0|sem |._merged2_llama_dare_linear_1|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_3|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_5|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_7|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_9|0.0000|± |0.0000| -| | | |._merged2_llama_linear_1 |0.8799|± |0.0194| -| | | |._merged2_llama_linear_3 |0.8587|± |0.0207| -| | | |._merged2_llama_linear_5 |0.8516|± |0.0212| -| | | |._merged2_llama_linear_7 |0.7880|± |0.0243| -| | | |._merged2_llama_linear_9 |0.7244|± |0.0266| -| | | |._merged2_llama_ties_1 |0.8198|± |0.0229| -| | | |._merged2_llama_ties_3 |0.8304|± |0.0223| -| | | |._merged2_llama_ties_5 |0.8057|± |0.0236| -| | | |._merged2_llama_ties_7 |0.4134|± |0.0293| -| | | |._merged2_llama_ties_9 |0.5618|± |0.0295| -|mm\|gpqa_diamond\|0 | 2|sem |._merged2_llama_dare_linear_1|0.0000| | | -| | | |._merged2_llama_dare_linear_3|0.0000| | | -| | | |._merged2_llama_dare_linear_5|0.0000| | | -| | | |._merged2_llama_dare_linear_7|0.0000| | | -| | | |._merged2_llama_dare_linear_9|0.0000| | | -| | | |._merged2_llama_linear_1 |0.0000| | | -| | | |._merged2_llama_linear_3 |0.0000| | | -| | | |._merged2_llama_linear_5 |0.0000| | | -| | | |._merged2_llama_linear_7 |0.0000| | | -| | | |._merged2_llama_linear_9 |0.0000| | | -| | | |._merged2_llama_ties_1 |0.0000| | | -| | | |._merged2_llama_ties_3 |0.0000| | | -| | | |._merged2_llama_ties_5 |0.0000| | | -| | | |._merged2_llama_ties_7 |0.0000| | | -| | | |._merged2_llama_ties_9 |0.0000| | | -|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|._merged2_llama_dare_linear_1|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_3|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_5|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_7|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_9|0.0000|± |0.0000| -| | | |._merged2_llama_linear_1 |0.8699|± |0.0141| -| | | |._merged2_llama_linear_3 |0.8594|± |0.0146| -| | | |._merged2_llama_linear_5 |0.6801|± |0.0196| -| | | |._merged2_llama_linear_7 |0.8014|± |0.0167| -| | | |._merged2_llama_linear_9 |0.6626|± |0.0198| -| | | |._merged2_llama_ties_1 |0.7996|± |0.0168| -| | | |._merged2_llama_ties_3 |0.8822|± |0.0135| -| | | |._merged2_llama_ties_5 |0.8822|± |0.0135| -| | | |._merged2_llama_ties_7 |0.5167|± |0.0210| -| | | |._merged2_llama_ties_9 |0.5237|± |0.0210| -|mm\|math_500\|0 | 3|math_pass@1:1_samples|._merged2_llama_dare_linear_1|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_3|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_5|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_7|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_9|0.0000|± |0.0000| -| | | |._merged2_llama_linear_1 |0.8723|± |0.0492| -| | | |._merged2_llama_linear_3 |0.8936|± |0.0455| -| | | |._merged2_llama_linear_5 |0.7660|± |0.0624| -| | | |._merged2_llama_linear_7 |0.9362|± |0.0360| -| | | |._merged2_llama_linear_9 |0.5957|± |0.0724| -| | | |._merged2_llama_ties_1 |0.7021|± |0.0674| -| | | |._merged2_llama_ties_3 |0.8511|± |0.0525| -| | | |._merged2_llama_ties_5 |1.0000|± |0.0000| -| | | |._merged2_llama_ties_7 |0.5106|± |0.0737| -| | | |._merged2_llama_ties_9 |0.5319|± |0.0736| -|mm\|truthfulqa\|0 | 0|sem |._merged2_llama_dare_linear_1|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_3|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_5|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_7|0.0000|± |0.0000| -| | | |._merged2_llama_dare_linear_9|0.0000|± |0.0000| -| | | |._merged2_llama_linear_1 |0.7381|± |0.0483| -| | | |._merged2_llama_linear_3 |0.7619|± |0.0468| -| | | |._merged2_llama_linear_5 |0.7500|± |0.0475| -| | | |._merged2_llama_linear_7 |0.7024|± |0.0502| -| | | |._merged2_llama_linear_9 |0.6310|± |0.0530| -| | | |._merged2_llama_ties_1 |0.7143|± |0.0496| -| | | |._merged2_llama_ties_3 |0.7262|± |0.0489| -| | | |._merged2_llama_ties_5 |0.6786|± |0.0513| -| | | |._merged2_llama_ties_7 |0.3452|± |0.0522| -| | | |._merged2_llama_ties_9 |0.4524|± |0.0546| +| Task |Version| Metric | Model |Value | |Stderr| +|--------------------|------:|---------------------|----------------------------------|-----:|---|-----:| +|mm\|arc_challenge_c\|0| 0|em |._models_Llama3-8B |0.8723|± |0.0187| +| | |pem |._models_Llama3-8B |0.8723|± |0.0187| +| | |pqem |._models_Llama3-8B |0.9003|± |0.0167| +| | |qem |._models_Llama3-8B |0.8723|± |0.0187| +|mm\|arc_challenge\|0 | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.3458|± |0.0266| +| | | |._merged_llama_llama_linear_3 |0.7259|± |0.0249| +| | | |._merged_llama_llama_linear_5 |0.8692|± |0.0189| +| | | |._merged_llama_llama_linear_7 |0.9190|± |0.0153| +| | | |._merged_llama_llama_linear_9 |0.9097|± |0.0160| +| | | |._merged_llama_llama_ties_1 |0.6168|± |0.0272| +| | | |._merged_llama_llama_ties_3 |0.8723|± |0.0187| +| | | |._merged_llama_llama_ties_5 |0.9097|± |0.0160| +| | | |._merged_llama_llama_ties_7 |0.9003|± |0.0167| +| | | |._merged_llama_llama_ties_9 |0.8879|± |0.0176| +| | | |._models_R1-Llama3-8B |0.9657|± |0.0102| +|mm\|arc_easy_c\|0 | 0|em |._models_Llama3-8B |0.9485|± |0.0077| +| | |pem |._models_Llama3-8B |0.9485|± |0.0077| +| | |pqem |._models_Llama3-8B |0.9653|± |0.0063| +| | |qem |._models_Llama3-8B |0.9485|± |0.0077| +|mm\|arc_easy\|0 | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.4192|± |0.0171| +| | | |._merged_llama_llama_linear_3 |0.7796|± |0.0144| +| | | |._merged_llama_llama_linear_5 |0.9281|± |0.0089| +| | | |._merged_llama_llama_linear_7 |0.9485|± |0.0077| +| | | |._merged_llama_llama_linear_9 |0.9545|± |0.0072| +| | | |._merged_llama_llama_ties_1 |0.7162|± |0.0156| +| | | |._merged_llama_llama_ties_3 |0.9461|± |0.0078| +| | | |._merged_llama_llama_ties_5 |0.9425|± |0.0081| +| | | |._merged_llama_llama_ties_7 |0.9341|± |0.0086| +| | | |._merged_llama_llama_ties_9 |0.9461|± |0.0078| +| | | |._models_R1-Llama3-8B |0.9868|± |0.0039| +|mm\|commonsenseqa_c\|0| 0|em |._models_Llama3-8B |0.7809|± |0.0246| +| | |pem |._models_Llama3-8B |0.7809|± |0.0246| +| | |pqem |._models_Llama3-8B |0.8269|± |0.0225| +| | |qem |._models_Llama3-8B |0.7951|± |0.0240| +|mm\|commonsenseqa\|0 | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.1979|± |0.0237| +| | | |._merged_llama_llama_linear_3 |0.6572|± |0.0283| +| | | |._merged_llama_llama_linear_5 |0.7173|± |0.0268| +| | | |._merged_llama_llama_linear_7 |0.8233|± |0.0227| +| | | |._merged_llama_llama_linear_9 |0.8163|± |0.0231| +| | | |._merged_llama_llama_ties_1 |0.5336|± |0.0297| +| | | |._merged_llama_llama_ties_3 |0.8127|± |0.0232| +| | | |._merged_llama_llama_ties_5 |0.8198|± |0.0229| +| | | |._merged_llama_llama_ties_7 |0.8127|± |0.0232| +| | | |._merged_llama_llama_ties_9 |0.8092|± |0.0234| +| | | |._models_R1-Llama3-8B |0.8869|± |0.0189| +|mm\|gpqa_diamond_c\|0 | 1|em |._models_Llama3-8B |0.0000| | | +| | |pem |._models_Llama3-8B |0.0000| | | +| | |pqem |._models_Llama3-8B |0.0000| | | +| | |qem |._models_Llama3-8B |0.0000| | | +|mm\|gpqa_diamond\|0 | 2|sem |._merged_llama_llama_dare_linear_1|0.0000| | | +| | | |._merged_llama_llama_dare_linear_3|0.0000| | | +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000| | | +| | | |._merged_llama_llama_dare_linear_9|0.0000| | | +| | | |._merged_llama_llama_linear_1 |0.0000| | | +| | | |._merged_llama_llama_linear_3 |0.0000| | | +| | | |._merged_llama_llama_linear_5 |0.0000| | | +| | | |._merged_llama_llama_linear_7 |0.0000| | | +| | | |._merged_llama_llama_linear_9 |0.0000| | | +| | | |._merged_llama_llama_ties_1 |0.0000| | | +| | | |._merged_llama_llama_ties_3 |0.0000| | | +| | | |._merged_llama_llama_ties_5 |0.0000| | | +| | | |._merged_llama_llama_ties_7 |0.0000| | | +| | | |._merged_llama_llama_ties_9 |0.0000| | | +| | | |._models_R1-Llama3-8B |0.0000| | | +|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|._models_Llama3-8B |0.4112|± |0.0206| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.1863|± |0.0163| +| | | |._merged_llama_llama_linear_3 |0.6977|± |0.0193| +| | | |._merged_llama_llama_linear_5 |0.8137|± |0.0163| +| | | |._merged_llama_llama_linear_7 |0.7610|± |0.0179| +| | | |._merged_llama_llama_linear_9 |0.7680|± |0.0177| +| | | |._merged_llama_llama_ties_1 |0.6749|± |0.0197| +| | | |._merged_llama_llama_ties_3 |0.7030|± |0.0192| +| | | |._merged_llama_llama_ties_5 |0.7188|± |0.0189| +| | | |._merged_llama_llama_ties_7 |0.7522|± |0.0181| +| | | |._merged_llama_llama_ties_9 |0.7311|± |0.0186| +| | | |._models_R1-Llama3-8B |0.7592|± |0.0179| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|._models_Llama3-8B |0.3830|± |0.0717| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.0213|± |0.0213| +| | | |._merged_llama_llama_linear_3 |0.6809|± |0.0687| +| | | |._merged_llama_llama_linear_5 |0.8936|± |0.0455| +| | | |._merged_llama_llama_linear_7 |0.8936|± |0.0455| +| | | |._merged_llama_llama_linear_9 |0.9574|± |0.0298| +| | | |._merged_llama_llama_ties_1 |0.7660|± |0.0624| +| | | |._merged_llama_llama_ties_3 |0.7872|± |0.0603| +| | | |._merged_llama_llama_ties_5 |0.8936|± |0.0455| +| | | |._merged_llama_llama_ties_7 |0.9787|± |0.0213| +| | | |._merged_llama_llama_ties_9 |0.9362|± |0.0360| +| | | |._models_R1-Llama3-8B |0.9149|± |0.0411| +|mm\|truthfulqa_c\|0 | 0|em |._models_Llama3-8B |0.6190|± |0.0533| +| | |pem |._models_Llama3-8B |0.6190|± |0.0533| +| | |pqem |._models_Llama3-8B |0.7381|± |0.0483| +| | |qem |._models_Llama3-8B |0.6190|± |0.0533| +|mm\|truthfulqa\|0 | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.0119|± |0.0119| +| | | |._merged_llama_llama_linear_3 |0.6190|± |0.0533| +| | | |._merged_llama_llama_linear_5 |0.6667|± |0.0517| +| | | |._merged_llama_llama_linear_7 |0.7381|± |0.0483| +| | | |._merged_llama_llama_linear_9 |0.7857|± |0.0450| +| | | |._merged_llama_llama_ties_1 |0.4167|± |0.0541| +| | | |._merged_llama_llama_ties_3 |0.6548|± |0.0522| +| | | |._merged_llama_llama_ties_5 |0.7024|± |0.0502| +| | | |._merged_llama_llama_ties_7 |0.7500|± |0.0475| +| | | |._merged_llama_llama_ties_9 |0.7262|± |0.0489| +| | | |._models_R1-Llama3-8B |0.7500|± |0.0475| diff --git a/merge_llama/logs/show_results1.log b/merge_llama/logs/show_results1.log new file mode 100644 index 0000000000000000000000000000000000000000..7f34929e3db5b4c91062190d8546cd3662d1bfb2 --- /dev/null +++ b/merge_llama/logs/show_results1.log @@ -0,0 +1,105 @@ +| Task |Version| Metric | Model |Value | |Stderr| +|--------------------|------:|---------------------|----------------------------------|-----:|---|-----:| +|arc_challenge | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.3458|± |0.0266| +| | | |._merged_llama_llama_linear_3 |0.7259|± |0.0249| +| | | |._merged_llama_llama_linear_5 |0.8692|± |0.0189| +| | | |._merged_llama_llama_linear_7 |0.9190|± |0.0153| +| | | |._merged_llama_llama_linear_9 |0.9097|± |0.0160| +| | | |._merged_llama_llama_ties_1 |0.6168|± |0.0272| +| | | |._merged_llama_llama_ties_3 |0.8723|± |0.0187| +| | | |._merged_llama_llama_ties_5 |0.9097|± |0.0160| +| | | |._merged_llama_llama_ties_7 |0.9003|± |0.0167| +| | | |._merged_llama_llama_ties_9 |0.8879|± |0.0176| +| | | |._models_R1-Llama3-8B |0.9657|± |0.0102| +| | | |._models_Llama3-8B |0.8723|± |0.0187| +|arc_easy | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.4192|± |0.0171| +| | | |._merged_llama_llama_linear_3 |0.7796|± |0.0144| +| | | |._merged_llama_llama_linear_5 |0.9281|± |0.0089| +| | | |._merged_llama_llama_linear_7 |0.9485|± |0.0077| +| | | |._merged_llama_llama_linear_9 |0.9545|± |0.0072| +| | | |._merged_llama_llama_ties_1 |0.7162|± |0.0156| +| | | |._merged_llama_llama_ties_3 |0.9461|± |0.0078| +| | | |._merged_llama_llama_ties_5 |0.9425|± |0.0081| +| | | |._merged_llama_llama_ties_7 |0.9341|± |0.0086| +| | | |._merged_llama_llama_ties_9 |0.9461|± |0.0078| +| | | |._models_R1-Llama3-8B |0.9868|± |0.0039| +| | | |._models_Llama3-8B |0.9485|± |0.0077| +|commonsenseqa | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.1979|± |0.0237| +| | | |._merged_llama_llama_linear_3 |0.6572|± |0.0283| +| | | |._merged_llama_llama_linear_5 |0.7173|± |0.0268| +| | | |._merged_llama_llama_linear_7 |0.8233|± |0.0227| +| | | |._merged_llama_llama_linear_9 |0.8163|± |0.0231| +| | | |._merged_llama_llama_ties_1 |0.5336|± |0.0297| +| | | |._merged_llama_llama_ties_3 |0.8127|± |0.0232| +| | | |._merged_llama_llama_ties_5 |0.8198|± |0.0229| +| | | |._merged_llama_llama_ties_7 |0.8127|± |0.0232| +| | | |._merged_llama_llama_ties_9 |0.8092|± |0.0234| +| | | |._models_R1-Llama3-8B |0.8869|± |0.0189| +| | | |._models_Llama3-8B |0.7809|± |0.0246| +|gsm8k | 0|math_pass@1:1_samples|._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.1863|± |0.0163| +| | | |._merged_llama_llama_linear_3 |0.6977|± |0.0193| +| | | |._merged_llama_llama_linear_5 |0.8137|± |0.0163| +| | | |._merged_llama_llama_linear_7 |0.7610|± |0.0179| +| | | |._merged_llama_llama_linear_9 |0.7680|± |0.0177| +| | | |._merged_llama_llama_ties_1 |0.6749|± |0.0197| +| | | |._merged_llama_llama_ties_3 |0.7030|± |0.0192| +| | | |._merged_llama_llama_ties_5 |0.7188|± |0.0189| +| | | |._merged_llama_llama_ties_7 |0.7522|± |0.0181| +| | | |._merged_llama_llama_ties_9 |0.7311|± |0.0186| +| | | |._models_R1-Llama3-8B |0.7592|± |0.0179| +| | | |._models_Llama3-8B |0.4112|± |0.0206| +|math_500 | 3|math_pass@1:1_samples|._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.0213|± |0.0213| +| | | |._merged_llama_llama_linear_3 |0.6809|± |0.0687| +| | | |._merged_llama_llama_linear_5 |0.8936|± |0.0455| +| | | |._merged_llama_llama_linear_7 |0.8936|± |0.0455| +| | | |._merged_llama_llama_linear_9 |0.9574|± |0.0298| +| | | |._merged_llama_llama_ties_1 |0.7660|± |0.0624| +| | | |._merged_llama_llama_ties_3 |0.7872|± |0.0603| +| | | |._merged_llama_llama_ties_5 |0.8936|± |0.0455| +| | | |._merged_llama_llama_ties_7 |0.9787|± |0.0213| +| | | |._merged_llama_llama_ties_9 |0.9362|± |0.0360| +| | | |._models_R1-Llama3-8B |0.9149|± |0.0411| +| | | |._models_Llama3-8B |0.3830|± |0.0717| +|truthfulqa | 0|sem |._merged_llama_llama_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_llama_llama_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_llama_llama_linear_1 |0.0119|± |0.0119| +| | | |._merged_llama_llama_linear_3 |0.6190|± |0.0533| +| | | |._merged_llama_llama_linear_5 |0.6667|± |0.0517| +| | | |._merged_llama_llama_linear_7 |0.7381|± |0.0483| +| | | |._merged_llama_llama_linear_9 |0.7857|± |0.0450| +| | | |._merged_llama_llama_ties_1 |0.4167|± |0.0541| +| | | |._merged_llama_llama_ties_3 |0.6548|± |0.0522| +| | | |._merged_llama_llama_ties_5 |0.7024|± |0.0502| +| | | |._merged_llama_llama_ties_7 |0.7500|± |0.0475| +| | | |._merged_llama_llama_ties_9 |0.7262|± |0.0489| +| | | |._models_R1-Llama3-8B |0.7500|± |0.0475| +| | | |._models_Llama3-8B |0.6190|± |0.0533| + diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2ae7499654875fbcc60e33584f5f3ede76b2d5e5..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b55aaeeba0dd0ff818bbb560214b6cf8b6607d0df7cae8f6f7404b0e1b602ea5 -size 3439334 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index cca966f167e5e85d27c8fe5ed42e839276c6dcbf..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd270497f60edab7f8d30354ec1ee36495eed4822ef283317a26e5761e858762 -size 8603100 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 0fb0611ef34498f831de76b10cd2d83e99258cc0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db08c7c9894d0a1b9899479d03a9469b2e4e70a3c07772f6344e24c8cd0ae498 -size 3185244 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index caf8ccffc4256e1da9e008567eeaa30e9c275d0d..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:93265f65cfccf8efd3b8b4a8d1f4b683f29129fdfe72c80f7a6cb2126ed357b9 -size 40011 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 1bf9ecc1b094effc7891f42c71b753033ea763dd..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d2217cc559a3aed3abfabd226ba6593ff2e3815d114e10c3c87d96e4b1e98f6d -size 4072264 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2f599a1ad1902a889f240486416041cb973f88d1..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8c311267e3c15058995fe4961f115fc4595bfb8fe578679503a5410fc8e0d296 -size 543841 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 50b4e2cfe71c077237a54743b9a3e1e41b1dcdff..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a12d01d16541c902c23e32a5988b20cf681f341ee431c625138ab3dfa954cc37 -size 1021703 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2ae7499654875fbcc60e33584f5f3ede76b2d5e5..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b55aaeeba0dd0ff818bbb560214b6cf8b6607d0df7cae8f6f7404b0e1b602ea5 -size 3439334 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index cca966f167e5e85d27c8fe5ed42e839276c6dcbf..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd270497f60edab7f8d30354ec1ee36495eed4822ef283317a26e5761e858762 -size 8603100 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 0fb0611ef34498f831de76b10cd2d83e99258cc0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db08c7c9894d0a1b9899479d03a9469b2e4e70a3c07772f6344e24c8cd0ae498 -size 3185244 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index caf8ccffc4256e1da9e008567eeaa30e9c275d0d..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:93265f65cfccf8efd3b8b4a8d1f4b683f29129fdfe72c80f7a6cb2126ed357b9 -size 40011 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 1bf9ecc1b094effc7891f42c71b753033ea763dd..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d2217cc559a3aed3abfabd226ba6593ff2e3815d114e10c3c87d96e4b1e98f6d -size 4072264 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2f599a1ad1902a889f240486416041cb973f88d1..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8c311267e3c15058995fe4961f115fc4595bfb8fe578679503a5410fc8e0d296 -size 543841 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 50b4e2cfe71c077237a54743b9a3e1e41b1dcdff..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a12d01d16541c902c23e32a5988b20cf681f341ee431c625138ab3dfa954cc37 -size 1021703 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2ae7499654875fbcc60e33584f5f3ede76b2d5e5..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b55aaeeba0dd0ff818bbb560214b6cf8b6607d0df7cae8f6f7404b0e1b602ea5 -size 3439334 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index cca966f167e5e85d27c8fe5ed42e839276c6dcbf..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd270497f60edab7f8d30354ec1ee36495eed4822ef283317a26e5761e858762 -size 8603100 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 0fb0611ef34498f831de76b10cd2d83e99258cc0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db08c7c9894d0a1b9899479d03a9469b2e4e70a3c07772f6344e24c8cd0ae498 -size 3185244 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index caf8ccffc4256e1da9e008567eeaa30e9c275d0d..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:93265f65cfccf8efd3b8b4a8d1f4b683f29129fdfe72c80f7a6cb2126ed357b9 -size 40011 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 1bf9ecc1b094effc7891f42c71b753033ea763dd..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d2217cc559a3aed3abfabd226ba6593ff2e3815d114e10c3c87d96e4b1e98f6d -size 4072264 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2f599a1ad1902a889f240486416041cb973f88d1..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8c311267e3c15058995fe4961f115fc4595bfb8fe578679503a5410fc8e0d296 -size 543841 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 50b4e2cfe71c077237a54743b9a3e1e41b1dcdff..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a12d01d16541c902c23e32a5988b20cf681f341ee431c625138ab3dfa954cc37 -size 1021703 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2ae7499654875fbcc60e33584f5f3ede76b2d5e5..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b55aaeeba0dd0ff818bbb560214b6cf8b6607d0df7cae8f6f7404b0e1b602ea5 -size 3439334 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index cca966f167e5e85d27c8fe5ed42e839276c6dcbf..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd270497f60edab7f8d30354ec1ee36495eed4822ef283317a26e5761e858762 -size 8603100 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 0fb0611ef34498f831de76b10cd2d83e99258cc0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db08c7c9894d0a1b9899479d03a9469b2e4e70a3c07772f6344e24c8cd0ae498 -size 3185244 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index caf8ccffc4256e1da9e008567eeaa30e9c275d0d..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:93265f65cfccf8efd3b8b4a8d1f4b683f29129fdfe72c80f7a6cb2126ed357b9 -size 40011 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 1bf9ecc1b094effc7891f42c71b753033ea763dd..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d2217cc559a3aed3abfabd226ba6593ff2e3815d114e10c3c87d96e4b1e98f6d -size 4072264 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2f599a1ad1902a889f240486416041cb973f88d1..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8c311267e3c15058995fe4961f115fc4595bfb8fe578679503a5410fc8e0d296 -size 543841 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 50b4e2cfe71c077237a54743b9a3e1e41b1dcdff..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a12d01d16541c902c23e32a5988b20cf681f341ee431c625138ab3dfa954cc37 -size 1021703 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2ae7499654875fbcc60e33584f5f3ede76b2d5e5..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b55aaeeba0dd0ff818bbb560214b6cf8b6607d0df7cae8f6f7404b0e1b602ea5 -size 3439334 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index cca966f167e5e85d27c8fe5ed42e839276c6dcbf..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dd270497f60edab7f8d30354ec1ee36495eed4822ef283317a26e5761e858762 -size 8603100 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 0fb0611ef34498f831de76b10cd2d83e99258cc0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db08c7c9894d0a1b9899479d03a9469b2e4e70a3c07772f6344e24c8cd0ae498 -size 3185244 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index caf8ccffc4256e1da9e008567eeaa30e9c275d0d..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:93265f65cfccf8efd3b8b4a8d1f4b683f29129fdfe72c80f7a6cb2126ed357b9 -size 40011 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 1bf9ecc1b094effc7891f42c71b753033ea763dd..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d2217cc559a3aed3abfabd226ba6593ff2e3815d114e10c3c87d96e4b1e98f6d -size 4072264 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2f599a1ad1902a889f240486416041cb973f88d1..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8c311267e3c15058995fe4961f115fc4595bfb8fe578679503a5410fc8e0d296 -size 543841 diff --git a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 50b4e2cfe71c077237a54743b9a3e1e41b1dcdff..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a12d01d16541c902c23e32a5988b20cf681f341ee431c625138ab3dfa954cc37 -size 1021703 diff --git a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index df74857dccacc6837e24411f0b0893817d090153..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fc14f9129b332275e019d50acdf8acff63018d1dba3045d36ebfbd9dbe2b8527 -size 3710976 diff --git a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index dde78c37dfc5abd081657baef85db9b76233109d..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d32cdf1e6c7c248360cc4b6be2a52de389b8e8a9ff50f66493971a10d046c9f2 -size 9339744 diff --git a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index ff10e73c3ac597a3cbf2c6e45ec687fb42abc65b..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4281331c4ef39aa9b7f866926e9232cecb484b282ea3f1828d8a0a11374b7e46 -size 3410502 diff --git a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 8ceca97f3ea5264c1b8635cd4cd8c8f6147e6d65..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c0c5da1837e9987618b06b4417b632b80e64b7d9cd394ea2bf700320096ae384 -size 33282 diff --git a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 594c0314e4382d01fc3325e7bee253a65b767578..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b0770d7894a7a286a2d0a0b795c18b6e75fa6be9bb53cddcef4cff7abba4ce49 -size 4427912 diff --git a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 86323ad11739dfba7529a14a17e6f4f3bf9cb5f2..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4aad6dfed2357e3beea15ef61c8675105bf0af3360ceea76516c53f12d4eacad -size 570051 diff --git a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index a8a7523a620081da504717af6af2a09e83e1c731..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:322a7fb5fa3edc1303d7f70a542300985e9844d0deb4ca592d551bfaa2e4a47e -size 1092586 diff --git a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 12b481a45556939f772af704004859385d3d6804..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:892e718268a7765ca5c23d66c4f7d21d115288739885c30c0c5a243979cb6e61 -size 3724091 diff --git a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index c8a5c58dd3e8847c28b28c65f9678d4603d29848..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fdfc697a313fc4318e12a4cfdacce90d02a847996244f178c8596551f4ff88d4 -size 9420131 diff --git a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index eb22c60d492facb1daab8b3fab807a11c2261795..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ae5ae540719b2954860eeeb45d2dbee144cd20970895667a4a4a14e98207e396 -size 3425592 diff --git a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 7de17ddfc2d2bb035f4defaf8732c98a3d0e21b0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ebc990c7ebdd8c6b2560d934b84efc72ef63d88a071a8ea7839d9f00c9644d96 -size 33418 diff --git a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2fdf29a867b1325c97c340a318e5081f14fbecb9..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:874e33afbf3648fa5b946287231525165eba6eb91962e6826f3be0beddc73bbc -size 4462817 diff --git a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 39c8925d624af10e608782d13877a9aa3255aa3e..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:9a13072759cc772002cd2ab9b92db490f5fde96ae6038aaf851b2a2cf9ae4aaa -size 569194 diff --git a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2c76fadec2bb09220e848762dde234f1d00d82df..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:334a26515bc1a8fe92fa2288faa9a9a84d136704dc6d17610515c6ca265f0de2 -size 1094284 diff --git a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index f18a4e3d8b9ddb9d1877ac3f5cb9f4c80c8de87a..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7dc28ed3ce0bef44ae8ce7608ec6a55e04e76f7e67b9d8a56959315df2eb8f4f -size 3873425 diff --git a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index eb9c101efe8e420e649328c78da0d6f2ec1f08d3..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7e0ad65eac1296ec6e13b13559ff5733835750e2742f189d738de8438651b619 -size 9701867 diff --git a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 4f8a76e47f5bea936c71e376e3b7a5de13f7cc90..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:601e2fd7aecd3b49965f341cf36fd4454ea33e7c126bdde6cc7c91d891f7d41d -size 3536856 diff --git a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 927964f50cb6aff94b4f0e89c68af93bf7fd45e0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:657703be1c2ab5625dde27316f55c0d5e44f8b98ad1df51562761fc8343e80f2 -size 34175 diff --git a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 8dce2dbd7fcbaa39179be4408fe424ea113cbde7..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5aa46a556ac38a5620aa2db3516a42cf41872ed721179b152eeac2ff51b2c64b -size 4745705 diff --git a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 5c270904f9adef48fb1b9d358d7e870c5066a156..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4bc70d6ca26e649ce89e95a276a783974c0f528d92afd1271cffbed16c056eb3 -size 590415 diff --git a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 8e9e2a33b9082c8563871a87f97dd5f0cbce16bc..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d323ecf02f968588c196fd0e0337dbd8f7f580babbb92a5b5ff875dfcb4a491f -size 1127057 diff --git a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index aec6d211f96fd02c8118bd492bbb1066b0566c98..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:188a18bcae54a72fa4200da4413245a5f693f1957bc9156da5463746adfdab10 -size 3862193 diff --git a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index d276bc73ecc2a594f3b0d5be826e81f326462bc0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b86236ad1417b5eb375ab36a74f886c6ac93560f695d4f4d9b855211d39a4c46 -size 9703368 diff --git a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 4a7e4ca6468b831afcde3aa0035a9af2f1e09c58..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5f77d210fbc92280f9e55df7007f49af0fdb7a83a29fe1e6decdf031a0357433 -size 3523645 diff --git a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 716413e4572792f529ac8673568645abcf218aee..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:36155c1b91d813587240e3badb34f6aa51964c89b436e51285363f5b3f987c49 -size 34827 diff --git a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 47d18cd723a6d1c702033fb6f1037ad31da7125b..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:83ecb273a8ceeac087fc78e3664b75e83c672ec660eea34c914ff9713c673206 -size 4695839 diff --git a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 5cdd4ec0623934f6019561002c65e65ed8050cfd..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4f0a492fcceca4d90738e21922bc79ffed354d72e0463d64785fe2e6cfbb5bc1 -size 587356 diff --git a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index e4ea5b5e9f8ec74d8d8e74210bfc75f579d37de5..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:970d27d9bbc6be926bde1a46876b08491d9359647914dd7c144213ebfa39226f -size 1128475 diff --git a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index e0f5a4a2f1fe22236aaff2996c332c34e4071bed..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dda020a994ea5b6d6428977c6de20ccd5a6e7907b94677d5b9816e448dabf01d -size 3931570 diff --git a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index bd99dd836c1e6c318dcde8ef954f4f3e5e074142..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0e8a0f3e989aa946b873c33f9bc9ebb154ce2a7e26979103ce3897c98a660d2b -size 9841646 diff --git a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index ae0defb876b20d841f730a81d5e0e51dc4e71e70..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:977e87511c561b87358d44d0dc18345f5544953d93c215bed26814f17cc610d7 -size 3564329 diff --git a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 1412298d659024d894ecc5c88cb9c3f7df579a95..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:883ca8e7e4bac3ea8bb75c73dcedb059356ffc1eff9764a33081179355537dcd -size 34241 diff --git a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 700e4b59f0ae7695d83c457e62bfa6ef1d07a4cd..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ec1707e233bcae02ce41250d85a3a558f80707d2fe941dd0b7c54bc4a7a688f8 -size 4784906 diff --git a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index b06257da037c31dbd3ab8da916ed1a62af27685b..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db24c203d293b37e2fd3b7d1d723f9624c5494b4040288a678997caed1c7b43a -size 593238 diff --git a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index c8ab8e3f81a1bc24f7ac0c8b6d40dcd233fb5262..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ebb6baf8443180417912df4d604626be6fe274125ff06ced41c7eb83ace968f5 -size 1137117 diff --git a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 69b37efff3e4a31800eeda3eea1efedbc8b586f0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cf67c4b12b3f2289367a3f7bca36acf632a18b695042ae8a7bc27a39350d42bf -size 3709936 diff --git a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index cb9b75a1c1c0e09fd225d384346d5cea693515a3..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0bbfae0648604953331ca6daebfdb1e56f1fb6718905844dcb8b6387e9174c3b -size 9311795 diff --git a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 7c8616904b039efe0edd1cef01229abd06e17681..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:58301106bb373e03f0ad865a849b681f0ad6671c09b552c312f4c51d691cbbeb -size 3407444 diff --git a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index a84ad92521b9d1bc43ed809e9f7b669c49172a25..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:06824f85e3663fcbadf70151f1afd79cb67f47e70e098c8f01dbf6cfd08b656a -size 33455 diff --git a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index d17f8bbe9d94c6cccfdfc8dced2dd920f0835211..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:59fa55bc8e3ca995e4103d8eaca5d4ce6e2ac40ea22a6c883202d1b56d913408 -size 4482855 diff --git a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 067d8dd78fb02d8e65c474ca20a2d27fc8170774..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7921a6b41fb371fcc5edf2e48278c8dce2eedcaf35ee44cffd007f73e5cf81fb -size 569423 diff --git a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index e1709debbf4d8e27f4ca8d3c6d2a030a25cbdb58..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8fcd66d387b93b10cb53703f999ef9cdb30a467015cea6755711ad930aa54319 -size 1086998 diff --git a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 84cff2e6edca5ad1163fad0ba2c705af75bb194e..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4de58abb56d365ba60cb376b953ac4bac9c855e9004da189b162d3c92a44efbf -size 3918266 diff --git a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 9a4545a36eb74764506c635d4a361b4b27932af2..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:56d38046b3932859b4472daa3f0c5d58c5812adfb41aa46836e5be77df170aab -size 9790801 diff --git a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 0498fcde265f2df167d47f6e5012463367cb417f..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:08e8fcd82b479ba401abd93fb9acba622ad3822538e40920b8d421d4c87b88ef -size 3580587 diff --git a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 08f888f9fc6623f9b2ead533447088b4934a2a6b..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aaa19bbf75baa8ee293e1776e52bbf52800452e279b286310566ff88643eb56c -size 34531 diff --git a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index af63bf63ae9941ecd5a7580d0e1388a7816d6444..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:21473952e9362db4ae431e679f327bb6bbd0e13344b09a4e62624062637fd35c -size 4609072 diff --git a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index a173481a80091ff299ca4e2373352bbb332b8e33..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:cc0080be2a617e157471bff79d7ceda5c3e1c4798993b081ce688ebb61f008c6 -size 603607 diff --git a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index afb0590f72ffbb758a13c345e882109f48764a67..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:d5321a6390adcdfdcef4dec8d103c8b612be03004528cee971abdc844f707354 -size 1145514 diff --git a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2426410d5cade3796041f790a1234ae941c0be3f..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2903e9197bea24084948b1ad8e782a0d22c796af8452ef48d015f4d2ffebe6c0 -size 3901292 diff --git a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 66b1d2b75805ee63eeacc581bab96d7dff643c82..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2f87f2b81dea5d6f3848d386b9fdd4bafd495ddd172c76923dad2333f6d3ce06 -size 9782284 diff --git a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 5509cf587da485e733ecbe440a95e916d59109dc..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:da2b4300911a02b312b5cdfd7e49e2d6ec798d4152e715df63357477507b7da0 -size 3605068 diff --git a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 95601b2f980b872ea38778eded2bfadd8326b641..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:10612214625a24a504ed3a2a18e756446879f349ffd797e115e627651501dfa8 -size 34248 diff --git a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index bf0a9db5b8864af06cc2cff6af229ae4753ea581..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:267d384d007bf867c33eb3359fa5fa2767b2f49e7c87fe44ab555c17b10c8312 -size 4700225 diff --git a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 466cb0eaf1140a49d98dfe6ecb0add942082a42b..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ec6f3728e0f0f2079f79552825e21708fd0d634d7e90b6a6c00ff5ba4083c03c -size 587075 diff --git a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 67e484cb9d5ab9d423137ed78119f1c9a50c04f1..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:dcafc005cc4fcedb246737bed407a7b00fa97617d8de786699123c62066b5ae3 -size 1144240 diff --git a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index af3cb0b107f65bbfed64e3612b58eb4bd28bcb8c..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8f603e5c88979fd8d28e979951e1680d2c6a820df79481f0727eff1a855b88c8 -size 3987372 diff --git a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 669c59d21de92b082534d97a4b142e3eea0508c9..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5b480f9ed01e84673dca485d73f9e2130308c43500b4bd43acfc3f0998669a4b -size 9976409 diff --git a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index fa3bbb779a55c8252b890e4a1f147bd2a7eed6a1..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:078ccbc0cb3d049881ac3ceadd14f2c08efc42ae8168ce1692f9491eda650ed1 -size 3594641 diff --git a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index bd4c10b4db5681461a6061c95abca70cb5933c0c..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e4acae5b961c53091fd041c1dd5ef0520d5351b7a5813151c8bc0edf49a9d9b5 -size 33422 diff --git a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index bee14d1b521a9e4943282e9140caf0ff6fcad8b9..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:01457124b67fe3854051c19361781912ba45ce0ee51bf1ea9a16db9f423cf722 -size 4895455 diff --git a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 82f27c6163c6b792eaf532b3f025470699223076..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:168a4eeaf8682064d809e9156136b487f30d0240d0d8ebb7fca5603fe735a5f7 -size 599860 diff --git a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 3e1c134c5e22e1f2bb65a0dd6522b76137953bf9..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:87e4a18ade27c86b8b49599e289a0e9562b956b9f3c12acf2f71b71e70b55bea -size 1156924 diff --git a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 75520188d0e3ae79218da6e41ad7f73f9d9f3401..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:85049021e9a163f5b8ee264ed71ee7a7c1d371c0fd44b7f8f81595a72bdeaca8 -size 4009370 diff --git a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 8018b0d4f57ac30d4e0ea1d295bb37523640b41f..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:84e36f2524e94edc993cc1b4a90610259c5560ee3b875f8f791ec6d2d4373546 -size 10064310 diff --git a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2fa8f6717f35f1f38b69034f3cfb320d067b2d50..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b1a97b495307739803fbfeb75ea7c8511c62c2e94e8d4c682088aba22c882fe5 -size 3640345 diff --git a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 2c30068dc5dd507043ac5952c717d55dfdcb1b28..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6698c546d0abfd80a6270d7a551ab182de02eda79f1f1ced662c90ec0a5f256f -size 34838 diff --git a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 8a3a5c4b21b17f75228b2a12bc5daae5be078fa0..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b2f08ec1929ebe2c957ba07c8bc1612bcb4b3c932ec165d0a7008cac1d45aab5 -size 4974892 diff --git a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 1d20eb38d793159f80bb4441773138c30ff05e40..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:7cc3c85d6125137856a0c07344ebe04f3fee4449a480522b70ecb0fa2e95400d -size 605694 diff --git a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet deleted file mode 100644 index 64f5470e39a2dd36a422739b7a310b362951098c..0000000000000000000000000000000000000000 --- a/merge_llama/outputs/._merged2_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:fe85b9d27958839c6e484d51ff49f0d756981e9af4511b8e7e44ddd6818306b6 -size 1158233 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c0640cfbb58f761df90c126415a6cc1512abd --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d30e40c0f984de190025257c902ce815ffef441054726490940b2931efc8b15 +size 3439462 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..34054af6db3496de1719c90558b6e129ccc2e4fe --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02f479af393414370918b6978a2d9ba9ae97fa7d9828378684f4e03c6c33c17 +size 8603205 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54c11a77ce8edc7cbb77107ee486b4470c5509ae --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69fd7d3f09932c44f56dcd0826b6df51c854f5be99eec8a438d242a4ee0ef718 +size 3185325 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..409bfacbd05e5007a53060cfcd7bcc9dea34154c --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c83276f59d23f867626c84cf4bfbc45b8d46f7c449ecf5f80f5eac2b1cf50b8a +size 40079 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9a7c08e29c379722e4113972b1984b34fc8e3c9d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5946b15fd996ebe08c2d657276bba64f4cf3ffb14c98d8eb26f785db4370df9 +size 4072412 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..148f876e86abd6b9af2484a204be4c6108b804ba --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77d07765e39cf1b0ea795ab1e38209e0b84c9f6893378deb7cf80b1758fc0c35 +size 543946 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e47e2a85df0b999adb989fde24d6deaabf563f99 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593f7102d87f589bd0422daf6269f46254129a65f270d910e0ce508a32c2eeb4 +size 1021806 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c0640cfbb58f761df90c126415a6cc1512abd --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d30e40c0f984de190025257c902ce815ffef441054726490940b2931efc8b15 +size 3439462 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..34054af6db3496de1719c90558b6e129ccc2e4fe --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02f479af393414370918b6978a2d9ba9ae97fa7d9828378684f4e03c6c33c17 +size 8603205 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54c11a77ce8edc7cbb77107ee486b4470c5509ae --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69fd7d3f09932c44f56dcd0826b6df51c854f5be99eec8a438d242a4ee0ef718 +size 3185325 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..409bfacbd05e5007a53060cfcd7bcc9dea34154c --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c83276f59d23f867626c84cf4bfbc45b8d46f7c449ecf5f80f5eac2b1cf50b8a +size 40079 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9a7c08e29c379722e4113972b1984b34fc8e3c9d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5946b15fd996ebe08c2d657276bba64f4cf3ffb14c98d8eb26f785db4370df9 +size 4072412 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..148f876e86abd6b9af2484a204be4c6108b804ba --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77d07765e39cf1b0ea795ab1e38209e0b84c9f6893378deb7cf80b1758fc0c35 +size 543946 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e47e2a85df0b999adb989fde24d6deaabf563f99 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593f7102d87f589bd0422daf6269f46254129a65f270d910e0ce508a32c2eeb4 +size 1021806 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c0640cfbb58f761df90c126415a6cc1512abd --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d30e40c0f984de190025257c902ce815ffef441054726490940b2931efc8b15 +size 3439462 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..34054af6db3496de1719c90558b6e129ccc2e4fe --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02f479af393414370918b6978a2d9ba9ae97fa7d9828378684f4e03c6c33c17 +size 8603205 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54c11a77ce8edc7cbb77107ee486b4470c5509ae --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69fd7d3f09932c44f56dcd0826b6df51c854f5be99eec8a438d242a4ee0ef718 +size 3185325 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..409bfacbd05e5007a53060cfcd7bcc9dea34154c --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c83276f59d23f867626c84cf4bfbc45b8d46f7c449ecf5f80f5eac2b1cf50b8a +size 40079 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9a7c08e29c379722e4113972b1984b34fc8e3c9d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5946b15fd996ebe08c2d657276bba64f4cf3ffb14c98d8eb26f785db4370df9 +size 4072412 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..148f876e86abd6b9af2484a204be4c6108b804ba --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77d07765e39cf1b0ea795ab1e38209e0b84c9f6893378deb7cf80b1758fc0c35 +size 543946 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e47e2a85df0b999adb989fde24d6deaabf563f99 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593f7102d87f589bd0422daf6269f46254129a65f270d910e0ce508a32c2eeb4 +size 1021806 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c0640cfbb58f761df90c126415a6cc1512abd --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d30e40c0f984de190025257c902ce815ffef441054726490940b2931efc8b15 +size 3439462 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..34054af6db3496de1719c90558b6e129ccc2e4fe --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02f479af393414370918b6978a2d9ba9ae97fa7d9828378684f4e03c6c33c17 +size 8603205 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54c11a77ce8edc7cbb77107ee486b4470c5509ae --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69fd7d3f09932c44f56dcd0826b6df51c854f5be99eec8a438d242a4ee0ef718 +size 3185325 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..409bfacbd05e5007a53060cfcd7bcc9dea34154c --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c83276f59d23f867626c84cf4bfbc45b8d46f7c449ecf5f80f5eac2b1cf50b8a +size 40079 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9a7c08e29c379722e4113972b1984b34fc8e3c9d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5946b15fd996ebe08c2d657276bba64f4cf3ffb14c98d8eb26f785db4370df9 +size 4072412 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..148f876e86abd6b9af2484a204be4c6108b804ba --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77d07765e39cf1b0ea795ab1e38209e0b84c9f6893378deb7cf80b1758fc0c35 +size 543946 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e47e2a85df0b999adb989fde24d6deaabf563f99 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593f7102d87f589bd0422daf6269f46254129a65f270d910e0ce508a32c2eeb4 +size 1021806 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c0640cfbb58f761df90c126415a6cc1512abd --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d30e40c0f984de190025257c902ce815ffef441054726490940b2931efc8b15 +size 3439462 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..34054af6db3496de1719c90558b6e129ccc2e4fe --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c02f479af393414370918b6978a2d9ba9ae97fa7d9828378684f4e03c6c33c17 +size 8603205 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54c11a77ce8edc7cbb77107ee486b4470c5509ae --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69fd7d3f09932c44f56dcd0826b6df51c854f5be99eec8a438d242a4ee0ef718 +size 3185325 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..409bfacbd05e5007a53060cfcd7bcc9dea34154c --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c83276f59d23f867626c84cf4bfbc45b8d46f7c449ecf5f80f5eac2b1cf50b8a +size 40079 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9a7c08e29c379722e4113972b1984b34fc8e3c9d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5946b15fd996ebe08c2d657276bba64f4cf3ffb14c98d8eb26f785db4370df9 +size 4072412 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..148f876e86abd6b9af2484a204be4c6108b804ba --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77d07765e39cf1b0ea795ab1e38209e0b84c9f6893378deb7cf80b1758fc0c35 +size 543946 diff --git a/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e47e2a85df0b999adb989fde24d6deaabf563f99 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_dare_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593f7102d87f589bd0422daf6269f46254129a65f270d910e0ce508a32c2eeb4 +size 1021806 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5cc6cd52aa717bd46ead1ae4797de2234d421c6c --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72e90cccc5925f52796153a688ee8f469a1bc00900f12d30e6f37d6370493c40 +size 3439725 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..996ebafb50135196f9ed9b2b36f993448d463225 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff367e4fd8c132623808f9ae8579c4616fd4a4e6942c7bfbf7aae5fe1ebdbf72 +size 8623069 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..639b7df0b24dc099e38e58fa6a7123eacb199ec7 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23b39071f031572381554505e9bcb2c907173b6d75c7bb58834676dc5e9b8fe4 +size 3174951 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..39d78e4db6061633e83d95be3609b51b84932949 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9f957a80547086c6a59f5822de99df76d58bb206ecf6a413d306c5c5a154aed +size 32555 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..57b4ed8b94bdb510646890da48bbdc5f1c341583 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c85e67bcb83af4639be932e63e3d408d75425278715e09afea9b298e23744e3 +size 4120962 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d6bdfc00d9921b72ecde0e8706956edade910857 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:696462a104beeabaa0190260e7feb78f49e62f301dfef8d5c370cfd4acc5c0ea +size 535436 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8eabc328b2ad2652728236e9f93700cf9900324f --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25d7545457c365616aff10b0d466363f54681f35e6cbc8f0986117eca9c5c117 +size 1009147 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..523a0f09abfd653ff43b1234378068f3152522d8 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2966922aceb2e37d13aabbafb6b61d578fc59ad164811203210beb3280e4a22 +size 3645678 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cdbaa798a21a3ff073a5b91d361d6c7b56b0bc97 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6783c78c3607f996c126243524105134b47bbe250476acf94ab0421d54609eb +size 9148580 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a0147174d8135fe5557d115bae040eaca91c6ab0 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:710c0e707a4a752e9ee35b424d27c02b9e984e5180e34e13dcd438148a6b9f33 +size 3372240 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5e60074e1ee1b18ffb3229eda5694af69ec45712 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d2bdf7387fed0e92dfd369066cca1f3456f6d3325ac92ea7f961c9a93c34acd +size 41268 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2dfb50c63a34e56c026fc59ea612a276d661674c --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bd21ae8cbb58b8d68257af69db61ecde96005f7939107ec8652c70a22642d59 +size 4357404 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6d4d2cadd41a5a06162d3f5c57e126a9823ec1cd --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9182ed797491eafefe6284885ee259f33a2c4190dcbe16f7dc8fd074538830c +size 568071 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8266c7e5b014ff36e1f692d34721dbe54e686ffd --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26c7b5225f0014ba7bcbc30a8c7d04aa61ceed2b3aca6588c95ca15c5c6b46fa +size 1070726 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f352455f45f575bf1fc3f685802fd41ab2b0c9ef --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c917e3b1ab1156e5096e8f18110f83c5f4afb10a8e04800daca6234c8947a206 +size 3733544 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7900983a5040d4316ece6f9b2a55a13b2972b355 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a32f954b214386376652e8a5e28e663769a4f0c51b852b6236238fcb18ca8f9 +size 9344236 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5feb504feadb522f919427125493b8449f398dd0 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae8cd0700b4a13d43956b90cbe8f93445cc29e697aaaf0535adf7d9bb86ea90a +size 3467855 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..00460ce8074124815483d81161a7f5d66129842d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b93a0052aeae72b0406bc813ec21c894d1f1c73458e0c167afaa7f42ff24ae21 +size 45007 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..19b72cfa0676be18c68f7f6b62f6b351b5c7c361 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:963206c25162ed4fe29873b7b1c15f1a2e2a4fb1c1311210452947aa5b8df840 +size 4462344 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d44572c0139119153b0fed511cd23553a2362444 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9995da154f408f650e59cefb22bcae474c912089f913e5bcf938e24c65ba6e1b +size 581342 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b488b7d1316289b536bc581629748dcd2858dcc2 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7243851c84be2dc71ccd9621b38163b1ae8796128851bce854c7a15b60140cf6 +size 1102542 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..73694efe4b53159a8fcf4cb1bc4ac6c9495b6fad --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c4fcd597c00b713e08668eec669ecbc7b032a62ba7c87b31f6efcc582451d22 +size 3824790 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6663a6dda9526af7a424c4c33c1bc33556b13cf2 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7ca82a59964515e52f0232ec0f4a885040d64c50f5c318c10d14b95e71a17bd +size 9561981 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..17d351e5b0a91fdbaae88e8f6f3e14962803831f --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cdbf3c182aea88017305d86a1e5a5b78a3ac17481f751dbedd0b3214380a86a +size 3537564 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cd8522f2d8fe292a2d4258a0f15646cc3392194f --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0b955a67591a97dcdcff5fc51c05fdb1467bfc8a5c6b691fe5a88e8f0452912 +size 48777 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9bfce4f65f2bc896c59b012b0f2395835c8c2da3 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3260069502b058e5c27c2b822b9197fd195051388df5de0f67bb8a77698a1a79 +size 4373146 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e8fffae1cc7aa18d10ff8c629cefa79edffcc66 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15965a0cd38caf1f5928ea22236d676ef5c2cf87225d335889866c08a2e5344b +size 577151 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f0ef7b0a41af92792f268dfd5160696f6056eea1 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90265ad0fde85395514fa686522b417aa806611b14d51d51e8791b2263e50976 +size 1139435 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2fe83f9f7d9b2a44d0e4d748eba3cce1f63530e9 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0be1cb80420a41599e522268fcdf32957c779ec9d1bfef64b946c7b1b08844c +size 3893297 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..47ad8378b561bcf43b2cac818a3c895c9fbbf664 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aeda117480b27d67bbcea9bf43552dfc701ae5a4b2a547f3be0581ef11a3d65 +size 9718841 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..aa87a6cf7fbffe7d44b2c584386aedf648ee4994 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cc400240b23a89cc09d2fb1fa161a949094f14fdbf05164c3ccd6aa037e2b36 +size 3604473 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..67384a7ca781ed9fe1962be632d64c9886d09d41 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5c644c73e8739c768ce71f81631b679fdb2500eca30487b3f278f6fccc120b7 +size 50380 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a1388d55f364ec6bd55b13abfdab077e73fa0f61 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a97bfa48c9cb3300a4961886869801b4f9bedde6acaffceec70fdc2952d08288 +size 4402499 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ca6f9290c48e8b26c3ddbd69c809356a1a0c139d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9da9c922189ad0d878859ee648d2c706b9f0da4f7a451db5abc820effbb996ff +size 590542 diff --git a/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a5f1c917fdbed33808949938c6d0ea153721e0b0 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_linear_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e41b477a75f83d557658ea897fd3cdf98966c56f15b43d963b9e1939ff6444b9 +size 1153817 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..436798292a4b986e5cf1cf1196d20b31edd98acc --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8565c9ab9d9eb0c03cecc3b9ac3e43fba121460d6e77f4ae02c463d1c48baa7 +size 3693117 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ae143af43c3933825ec192c45bb4867e9f57170f --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fa92a0bd9e27678441c0ed59a9cbe4c46c6607456a650305e998ba7cd7c026d +size 9237031 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2b28273cbbacb21c53e72ef370087fb5f6f93257 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e55307cfb021994aa26f9b641b40372084a2956735bad656cf02bf215d227dab +size 3379295 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b01fb614835097f189361e1311ef6dc14324c9a0 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7816aae1b471ae723dfeee1b0e6b2680804199aa5b662c099228ac93449f327e +size 41473 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e0ee2e0a7f69f99708c70aa87559d96aaee46f12 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b01079d98d4b23526e35fca83f61e8975ce7048b78e415fdb17c5f9c3b4169d9 +size 4497831 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..03096031887eb538abfb60c3744717e96534a842 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a00abfd056314da46fd74fc0d2833ad627280a00dca78d5936d4893a61b87a16 +size 575773 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e1514e0fdd3bdb4361f91976f624c9bc2dd96fcc --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_1/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9abf4c9871c1898e98d0cee4e58ebf8ad68393d199cda341661194e093a2cadd +size 1074603 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8ae5c00d1d2458f69801e994c8a28f95537cdd88 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b512b5d751fc132fb25c28623557c955f46a41be05814cc06ee0e01f417ffd1a +size 3867843 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e10e7cb3d1eba0a36aa49c362dae00be6c1c9cd4 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:300be524cc1ea698295257ec4fd8201a64069d6fa35d0287e7162986d99feaab +size 9646794 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2f8cbd5963275e943e869c72528fd095adc7da2d --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc61be7f2f9954213e05706c450bdcb0eba50e92516574ae6ef79871c0022eca +size 3562254 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..34e7026c43ea14ba55ef2f5eb8acfc82f70a1ca0 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15e736947c4d023e4e2db1925d21fc0af4a06e027e0345d750c90749b5e97502 +size 48987 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5b1587d761f4cf5afadfc223c5153eb5e86fcf3a --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4c5b0f315408b76f29102a7b9dcd14bcb242da3e14d7be809fff2bd71b5ea8c +size 4452860 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d7c91799ba34fea0b1042cb83f4c0fee7bc14587 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:510f03d1d7ffdda58ca07dbd9eafb91063001ee9810dc458d5a51f7f96dd70ac +size 585741 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..27fc64a84566c91718d3f6f17d1f6bc76621524f --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_3/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba4cf92b6172e9bec727481cc303083abf3691893cd813df245ba79a8de769dc +size 1135894 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b9a4b422d4428754b14d9b1b69ed0f8fe4256fc6 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6589682ed8ab03d3fbc09b7d127cdc59c4caeaf5f9c23a1bc51054fa8df283ce +size 3908920 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0112d8c5fb5c654cd4f04904fe44a6af07a63880 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34a75d4fb74181ad38adafeae4120d4a1681668aeeb75f8c7503177ab16dc03e +size 9753271 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f1b493e8d12a8c59c8fe96a515e854ee8a035969 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:900b0780ae3311680a280d984f9f20d5327c5edfef80a50f9c216bb3fea1f3db +size 3607135 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1254eda1e2dd889b041efc5972c90dffcf589aee --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cd816c137addb8d59ddaeceb85f715126db72c68efdbff3ac6630dc87c37b81 +size 34347 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..91b454d57b8f2c9c29755fbd1c2ea131fdaf563f --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa44962e3122c681529bfdf8c0a4f3c0329b1bf8ee0efad84a4ca7724830981c +size 4430817 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..56ff43fa6de5abdf9441f7e396bf8deca20764f3 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2f313c080d2826a97f7e712d088f2988e5229f176ebcf7a75e5ce2c35f037be +size 586470 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6d589a413ce386733df909e32f9112178842b0a8 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_5/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da1f5208c5fa6dc8f91a1f7f38e6413e3e5f058287ce452e8ef39c4374948ffb +size 1147605 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..11ff07fd1a0de132c8ff6f89122926998ae844f9 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e895a3a63d9cc81cd0908f5db54c30622557c29eeb8245072787f98ea85e7c02 +size 3914373 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4c2bb595504fe0c0cbe70023c6a399b9aea79bfe --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53887a7421ab0813bf7b90441ad1a3230460820335667b9384460c4a131b0f78 +size 9781766 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..83014e1094c7f9c7aa07842c95104e38e6689f92 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48f8a1ed3b85de839f231ea6fb0ac4b1bfc9f715c9aeeb5358b8aeb3a5783e5d +size 3631774 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c5738ab2a1c1ab3ce5ac858ecca78e1d18166c67 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d34948f52c8d36e04450fc3bde241f421a036851292e563608eaea813a3cfab +size 34493 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c18c51dcdf99779381b595a78fc780892ebd48e3 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a76ea24d93e3c6ba6bfa4ad737e5d9a186e02dd4efd3078cb8642acc8aa50cbe +size 4429761 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b95fa6258acddeaaa3a8c566450389729205dda6 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:802a2b7cbb28b98f344d0fb7f269e0340ad37d9300a5167049a4ef7f3dc8074b +size 596868 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e4f63a14ab9206551da11004dd0c1caef3f989c1 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_7/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb46ecfd11d2907f223a0df184851d7addaf4300cf6e1bbfca318933838d89dc +size 1157311 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..575b4cc4c2afcbe2223ab3ffa8ccd7d2494aacaa --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ebf143eea0ff8897c7f40c1ba3b226154d3ac33ff81d20f71ca460651314a92 +size 3915070 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c3da5cbfde185987f4f6f836ee060178329b0aed --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e7ba503e21fa0ef1327ce50375225a2e8fa1c7503d06f6cd3035ccaaa4deecb +size 9782111 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..362c255b5f897077220b0eb907341346426ec6ca --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b0e71ff7a9a0e829dedff4c77fa97591b8356d653b560d6ae06e7f5be5c2eb3 +size 3626590 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e5ce22f5a240fe0eee0aa4b5158408b3e67a7117 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab9af20101bfa5b82aa7a807bc7900dd96ac63623def372e635bb20d8ae8cbec +size 35131 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1240001f17ac1014438fd8bdc3ad4163a6da6317 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:485b88caf8908330a641a52f0f8674916c05a38b923772f3acf32c48b4d3a7aa +size 4428779 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..195d357229d986a54f22bda26d25c55251342402 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d8b564be56448094a43ccbbc8267bb5f9fd0e9e9e51461956809ca546990a25 +size 596774 diff --git a/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1ada4d12545b139766e3d604aff1a7646ec23ba4 --- /dev/null +++ b/merge_llama/outputs/._merged_llama_llama_ties_9/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0701cd9d485a439796440130c628fdc53949484330aef4e1388b4e94241d6bb +size 1168269 diff --git a/merge_llama/results/._merged2_llama_ties_7/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged2_llama_ties_7/results_2025-06-23T10-15-33.465228.json deleted file mode 100644 index 1ef35ff9c30ef9652b52ca665992dc2186fa355d..0000000000000000000000000000000000000000 --- a/merge_llama/results/._merged2_llama_ties_7/results_2025-06-23T10-15-33.465228.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "results": { - "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.5166959578207382, - "math_pass@1:1_samples_stderr": 0.020967834394902497 - }, - "mm|arc_easy|0": { - "sem": 0.49820359281437127, - "sem_stderr": 0.017313472281651344 - }, - "mm|math_500|0": { - "math_pass@1:1_samples": 0.5106382978723404, - "math_pass@1:1_samples_stderr": 0.07370428968378202 - }, - "mm|arc_challenge|0": { - "sem": 0.45482866043613707, - "sem_stderr": 0.027836551402899586 - }, - "mm|truthfulqa|0": { - "sem": 0.34523809523809523, - "sem_stderr": 0.05218696149243466 - }, - "mm|gpqa_diamond|0": { - "sem": 0.0 - }, - "mm|commonsenseqa|0": { - "sem": 0.4134275618374558, - "sem_stderr": 0.029324862551333132 - }, - "all": { - "math_pass@1:1_samples": 0.5136671278465392, - "math_pass@1:1_samples_stderr": 0.047336062039342264, - "sem": 0.34233958206521187, - "sem_stderr": 0.031665461932079676 - } - }, - "versions": { - "mm|aime24|0": 3, - "mm|arc_challenge|0": 0, - "mm|arc_easy|0": 0, - "mm|commonsenseqa|0": 0, - "mm|gpqa_diamond|0": 2, - "mm|gsm8k|0": 0, - "mm|math_500|0": 3, - "mm|mmlu_pro|0": 0, - "mm|truthfulqa|0": 0 - }, - "size": { - "mm|gsm8k|0": 569, - "mm|arc_easy|0": 835, - "mm|math_500|0": 47, - "mm|arc_challenge|0": 321, - "mm|truthfulqa|0": 84, - "mm|gpqa_diamond|0": 1, - "mm|commonsenseqa|0": 283 - } -} \ No newline at end of file diff --git a/merge_llama/results/._merged2_llama_ties_9/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged2_llama_ties_9/results_2025-06-23T10-15-33.465228.json deleted file mode 100644 index 159a5548f47e9a47225d6351d6b85694cba58a18..0000000000000000000000000000000000000000 --- a/merge_llama/results/._merged2_llama_ties_9/results_2025-06-23T10-15-33.465228.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "results": { - "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.523725834797891, - "math_pass@1:1_samples_stderr": 0.02095590124805444 - }, - "mm|arc_easy|0": { - "sem": 0.6323353293413174, - "sem_stderr": 0.016696161932346288 - }, - "mm|math_500|0": { - "math_pass@1:1_samples": 0.5319148936170213, - "math_pass@1:1_samples_stderr": 0.07357064625618348 - }, - "mm|arc_challenge|0": { - "sem": 0.573208722741433, - "sem_stderr": 0.02764962041526109 - }, - "mm|truthfulqa|0": { - "sem": 0.4523809523809524, - "sem_stderr": 0.05463266447609236 - }, - "mm|gpqa_diamond|0": { - "sem": 0.0 - }, - "mm|commonsenseqa|0": { - "sem": 0.5618374558303887, - "sem_stderr": 0.029545981057564952 - }, - "all": { - "math_pass@1:1_samples": 0.5278203642074561, - "math_pass@1:1_samples_stderr": 0.04726327375211896, - "sem": 0.4439524920588182, - "sem_stderr": 0.03213110697031617 - } - }, - "versions": { - "mm|aime24|0": 3, - "mm|arc_challenge|0": 0, - "mm|arc_easy|0": 0, - "mm|commonsenseqa|0": 0, - "mm|gpqa_diamond|0": 2, - "mm|gsm8k|0": 0, - "mm|math_500|0": 3, - "mm|mmlu_pro|0": 0, - "mm|truthfulqa|0": 0 - }, - "size": { - "mm|gsm8k|0": 569, - "mm|arc_easy|0": 835, - "mm|math_500|0": 47, - "mm|arc_challenge|0": 321, - "mm|truthfulqa|0": 84, - "mm|gpqa_diamond|0": 1, - "mm|commonsenseqa|0": 283 - } -} \ No newline at end of file diff --git a/merge_llama/results/._merged2_llama_dare_linear_1/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_dare_linear_1/results_2025-06-23T10-15-33.465228.json similarity index 100% rename from merge_llama/results/._merged2_llama_dare_linear_1/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_dare_linear_1/results_2025-06-23T10-15-33.465228.json diff --git a/merge_llama/results/._merged2_llama_dare_linear_3/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_dare_linear_3/results_2025-06-23T10-15-33.465228.json similarity index 100% rename from merge_llama/results/._merged2_llama_dare_linear_3/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_dare_linear_3/results_2025-06-23T10-15-33.465228.json diff --git a/merge_llama/results/._merged2_llama_dare_linear_5/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_dare_linear_7/results_2025-06-23T10-15-33.465228.json similarity index 100% rename from merge_llama/results/._merged2_llama_dare_linear_5/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_dare_linear_7/results_2025-06-23T10-15-33.465228.json diff --git a/merge_llama/results/._merged2_llama_dare_linear_7/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_dare_linear_9/results_2025-06-23T10-15-33.465228.json similarity index 100% rename from merge_llama/results/._merged2_llama_dare_linear_7/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_dare_linear_9/results_2025-06-23T10-15-33.465228.json diff --git a/merge_llama/results/._merged_llama_llama_linear_1/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_linear_1/results_2025-06-23T10-15-33.465228.json new file mode 100644 index 0000000000000000000000000000000000000000..090a8835af14c131553612ce1b49a9fcd1b8c947 --- /dev/null +++ b/merge_llama/results/._merged_llama_llama_linear_1/results_2025-06-23T10-15-33.465228.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.18629173989455183, + "math_pass@1:1_samples_stderr": 0.016336423894838588 + }, + "mm|arc_easy|0": { + "sem": 0.41916167664670656, + "sem_stderr": 0.01708580284463805 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.02127659574468085, + "math_pass@1:1_samples_stderr": 0.02127659574468085 + }, + "mm|arc_challenge|0": { + "sem": 0.34579439252336447, + "sem_stderr": 0.0265883333568716 + }, + "mm|truthfulqa|0": { + "sem": 0.011904761904761904, + "sem_stderr": 0.011904761904761895 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.1978798586572438, + "sem_stderr": 0.023724439040445366 + }, + "all": { + "math_pass@1:1_samples": 0.10378416781961634, + "math_pass@1:1_samples_stderr": 0.018806509819759717, + "sem": 0.19494813794641533, + "sem_stderr": 0.01982583428667923 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|gsm8k|0": 569, + "mm|arc_easy|0": 835, + "mm|math_500|0": 47, + "mm|arc_challenge|0": 321, + "mm|truthfulqa|0": 84, + "mm|gpqa_diamond|0": 1, + "mm|commonsenseqa|0": 283 + } +} \ No newline at end of file diff --git a/merge_llama/results/._merged2_llama_linear_9/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_linear_3/results_2025-06-23T10-15-33.465228.json similarity index 51% rename from merge_llama/results/._merged2_llama_linear_9/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_linear_3/results_2025-06-23T10-15-33.465228.json index d085a39bd984dfc547a0b610436b111c36d36dcf..272c905b2b4d2706cf9c7a27d808ead7dfb5f2e3 100644 --- a/merge_llama/results/._merged2_llama_linear_9/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_linear_3/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.6625659050966608, - "math_pass@1:1_samples_stderr": 0.01983968877982878 + "math_pass@1:1_samples": 0.6977152899824253, + "math_pass@1:1_samples_stderr": 0.019269615216999172 }, "mm|arc_easy|0": { - "sem": 0.858682634730539, - "sem_stderr": 0.012062326528256907 + "sem": 0.7796407185628742, + "sem_stderr": 0.014352577644379201 }, "mm|math_500|0": { - "math_pass@1:1_samples": 0.5957446808510638, - "math_pass@1:1_samples_stderr": 0.07235674844413013 + "math_pass@1:1_samples": 0.6808510638297872, + "math_pass@1:1_samples_stderr": 0.0687296045180637 }, "mm|arc_challenge|0": { - "sem": 0.8006230529595015, - "sem_stderr": 0.022334515051632295 + "sem": 0.7258566978193146, + "sem_stderr": 0.024936716908140318 }, "mm|truthfulqa|0": { - "sem": 0.6309523809523809, - "sem_stderr": 0.05296639920604615 + "sem": 0.6190476190476191, + "sem_stderr": 0.05330381938880981 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.7243816254416962, - "sem_stderr": 0.02660806189914765 + "sem": 0.657243816254417, + "sem_stderr": 0.028263851951331086 }, "all": { - "math_pass@1:1_samples": 0.6291552929738623, - "math_pass@1:1_samples_stderr": 0.046098218611979457, - "sem": 0.6029279388168234, - "sem_stderr": 0.02849282567127075 + "math_pass@1:1_samples": 0.6892831769061063, + "math_pass@1:1_samples_stderr": 0.04399960986753144, + "sem": 0.556357770336845, + "sem_stderr": 0.030214241473165104 } }, "versions": { diff --git a/merge_llama/results/._merged2_llama_linear_3/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_linear_5/results_2025-06-23T10-15-33.465228.json similarity index 54% rename from merge_llama/results/._merged2_llama_linear_3/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_linear_5/results_2025-06-23T10-15-33.465228.json index 324b7275d1b1b5aa213d35f3b937f9248b6f67e5..87f6a00bac1f771d21973fe404f6a08ae517d577 100644 --- a/merge_llama/results/._merged2_llama_linear_3/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_linear_5/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.859402460456942, - "math_pass@1:1_samples_stderr": 0.014585220151296579 + "math_pass@1:1_samples": 0.8137082601054482, + "math_pass@1:1_samples_stderr": 0.01633642389483859 }, "mm|arc_easy|0": { - "sem": 0.9820359281437125, - "sem_stderr": 0.0045992080408123635 + "sem": 0.9281437125748503, + "sem_stderr": 0.008942459370635843 }, "mm|math_500|0": { "math_pass@1:1_samples": 0.8936170212765957, - "math_pass@1:1_samples_stderr": 0.04546036031565446 + "math_pass@1:1_samples_stderr": 0.045460360315654445 }, "mm|arc_challenge|0": { - "sem": 0.9376947040498442, - "sem_stderr": 0.013511956053658467 + "sem": 0.8691588785046729, + "sem_stderr": 0.018851535284060073 }, "mm|truthfulqa|0": { - "sem": 0.7619047619047619, - "sem_stderr": 0.04675054225464911 + "sem": 0.6666666666666666, + "sem_stderr": 0.051743368380422164 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.8586572438162544, - "sem_stderr": 0.02074541575081619 + "sem": 0.7173144876325088, + "sem_stderr": 0.02681526065091796 }, "all": { - "math_pass@1:1_samples": 0.8765097408667688, - "math_pass@1:1_samples_stderr": 0.030022790233475517, - "sem": 0.7080585275829145, - "sem_stderr": 0.021401780524984034 + "math_pass@1:1_samples": 0.8536626406910219, + "math_pass@1:1_samples_stderr": 0.030898392105246517, + "sem": 0.6362567490757398, + "sem_stderr": 0.02658815592150901 } }, "versions": { diff --git a/merge_llama/results/._merged2_llama_ties_1/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_linear_7/results_2025-06-23T10-15-33.465228.json similarity index 53% rename from merge_llama/results/._merged2_llama_ties_1/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_linear_7/results_2025-06-23T10-15-33.465228.json index ee84c2224745e0d3a5c6d2cb22ee015c85445f3b..4ed5ac68ce608d46ed4d28f6b32c186ff761549e 100644 --- a/merge_llama/results/._merged2_llama_ties_1/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_linear_7/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.7996485061511424, - "math_pass@1:1_samples_stderr": 0.016794678313392368 + "math_pass@1:1_samples": 0.7609841827768014, + "math_pass@1:1_samples_stderr": 0.017894804912395246 }, "mm|arc_easy|0": { - "sem": 0.9676646706586827, - "sem_stderr": 0.006125168799876929 + "sem": 0.948502994011976, + "sem_stderr": 0.007652922543446236 }, "mm|math_500|0": { - "math_pass@1:1_samples": 0.7021276595744681, - "math_pass@1:1_samples_stderr": 0.06742861107915606 + "math_pass@1:1_samples": 0.8936170212765957, + "math_pass@1:1_samples_stderr": 0.045460360315654445 }, "mm|arc_challenge|0": { "sem": 0.9190031152647975, - "sem_stderr": 0.015251679149194798 + "sem_stderr": 0.015251679149194806 }, "mm|truthfulqa|0": { - "sem": 0.7142857142857143, - "sem_stderr": 0.04958643817861591 + "sem": 0.7380952380952381, + "sem_stderr": 0.04826017061124183 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.8197879858657244, - "sem_stderr": 0.022888539987329246 + "sem": 0.823321554770318, + "sem_stderr": 0.022711821852224343 }, "all": { - "math_pass@1:1_samples": 0.7508880828628053, - "math_pass@1:1_samples_stderr": 0.04211164469627421, - "sem": 0.6841482972149838, - "sem_stderr": 0.02346295652875422 + "math_pass@1:1_samples": 0.8273006020266985, + "math_pass@1:1_samples_stderr": 0.03167758261402485, + "sem": 0.6857845804284659, + "sem_stderr": 0.023469148539026805 } }, "versions": { diff --git a/merge_llama/results/._merged_llama_llama_linear_9/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_linear_9/results_2025-06-23T10-15-33.465228.json new file mode 100644 index 0000000000000000000000000000000000000000..27045c7687006e8a1912d6f2ddeec4dddba7d7e2 --- /dev/null +++ b/merge_llama/results/._merged_llama_llama_linear_9/results_2025-06-23T10-15-33.465228.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7680140597539543, + "math_pass@1:1_samples_stderr": 0.017710925239577572 + }, + "mm|arc_easy|0": { + "sem": 0.9544910179640719, + "sem_stderr": 0.0072169128055993076 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.9574468085106383, + "math_pass@1:1_samples_stderr": 0.029760791752350458 + }, + "mm|arc_challenge|0": { + "sem": 0.9096573208722741, + "sem_stderr": 0.016025456258691055 + }, + "mm|truthfulqa|0": { + "sem": 0.7857142857142857, + "sem_stderr": 0.045039119132976015 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.8162544169611308, + "sem_stderr": 0.023061984296456013 + }, + "all": { + "math_pass@1:1_samples": 0.8627304341322963, + "math_pass@1:1_samples_stderr": 0.023735858495964015, + "sem": 0.6932234083023525, + "sem_stderr": 0.022835868123430594 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|gsm8k|0": 569, + "mm|arc_easy|0": 835, + "mm|math_500|0": 47, + "mm|arc_challenge|0": 321, + "mm|truthfulqa|0": 84, + "mm|gpqa_diamond|0": 1, + "mm|commonsenseqa|0": 283 + } +} \ No newline at end of file diff --git a/merge_llama/results/._merged2_llama_linear_5/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_ties_1/results_2025-06-23T10-15-33.465228.json similarity index 59% rename from merge_llama/results/._merged2_llama_linear_5/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_ties_1/results_2025-06-23T10-15-33.465228.json index 4de7c6a49b869e6e6492574b237625faee37f90e..ccc3d176bea5acdb070dea584fe7bd178a0574d9 100644 --- a/merge_llama/results/._merged2_llama_linear_5/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_ties_1/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.680140597539543, - "math_pass@1:1_samples_stderr": 0.019570627113877893 + "math_pass@1:1_samples": 0.6748681898066784, + "math_pass@1:1_samples_stderr": 0.01965463767234652 }, "mm|arc_easy|0": { - "sem": 0.9748502994011976, - "sem_stderr": 0.005421910558007435 + "sem": 0.7161676646706587, + "sem_stderr": 0.01561188214821106 }, "mm|math_500|0": { "math_pass@1:1_samples": 0.7659574468085106, "math_pass@1:1_samples_stderr": 0.06242676343682882 }, "mm|arc_challenge|0": { - "sem": 0.9345794392523364, - "sem_stderr": 0.013822616315653345 + "sem": 0.616822429906542, + "sem_stderr": 0.02717722621232775 }, "mm|truthfulqa|0": { - "sem": 0.75, - "sem_stderr": 0.04752931878933585 + "sem": 0.4166666666666667, + "sem_stderr": 0.054114509952658096 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.8515901060070671, - "sem_stderr": 0.021170064285132975 + "sem": 0.5335689045936396, + "sem_stderr": 0.029707386707350843 }, "all": { - "math_pass@1:1_samples": 0.7230490221740269, - "math_pass@1:1_samples_stderr": 0.04099869527535335, - "sem": 0.7022039689321202, - "sem_stderr": 0.0219859774870324 + "math_pass@1:1_samples": 0.7204128183075945, + "math_pass@1:1_samples_stderr": 0.04104070055458767, + "sem": 0.45664513316750144, + "sem_stderr": 0.03165275125513694 } }, "versions": { diff --git a/merge_llama/results/._merged2_llama_linear_1/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_ties_3/results_2025-06-23T10-15-33.465228.json similarity index 51% rename from merge_llama/results/._merged2_llama_linear_1/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_ties_3/results_2025-06-23T10-15-33.465228.json index 9af469d4728b0d1706e38ff19000166e6b9969a3..7e8721302c767a14d032679fab138dc248ce46af 100644 --- a/merge_llama/results/._merged2_llama_linear_1/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_ties_3/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.8699472759226714, - "math_pass@1:1_samples_stderr": 0.014113412175475131 + "math_pass@1:1_samples": 0.70298769771529, + "math_pass@1:1_samples_stderr": 0.019172860558623793 }, "mm|arc_easy|0": { - "sem": 0.9736526946107784, - "sem_stderr": 0.005546092548046579 + "sem": 0.9461077844311377, + "sem_stderr": 0.007818983591261023 }, "mm|math_500|0": { - "math_pass@1:1_samples": 0.8723404255319149, - "math_pass@1:1_samples_stderr": 0.04920290896196927 + "math_pass@1:1_samples": 0.7872340425531915, + "math_pass@1:1_samples_stderr": 0.060342609647735204 }, "mm|arc_challenge|0": { - "sem": 0.9501557632398754, - "sem_stderr": 0.012165497937549305 + "sem": 0.8722741433021807, + "sem_stderr": 0.01865910989207314 }, "mm|truthfulqa|0": { - "sem": 0.7380952380952381, - "sem_stderr": 0.04826017061124184 + "sem": 0.6547619047619048, + "sem_stderr": 0.05218696149243465 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.8798586572438163, - "sem_stderr": 0.01936101593417243 + "sem": 0.8127208480565371, + "sem_stderr": 0.023232228103701378 }, "all": { - "math_pass@1:1_samples": 0.8711438507272932, - "math_pass@1:1_samples_stderr": 0.0316581605687222, - "sem": 0.7083524706379416, - "sem_stderr": 0.021333194257752538 + "math_pass@1:1_samples": 0.7451108701342408, + "math_pass@1:1_samples_stderr": 0.039757735103179495, + "sem": 0.6571729361103521, + "sem_stderr": 0.025474320769867546 } }, "versions": { diff --git a/merge_llama/results/._merged2_llama_ties_3/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_ties_5/results_2025-06-23T10-15-33.465228.json similarity index 54% rename from merge_llama/results/._merged2_llama_ties_3/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_ties_5/results_2025-06-23T10-15-33.465228.json index c2a9c058e9095c5d440f9299ba1b66cebbb4125d..f6c0ecfc01ef01ade52c6dc80de9215003a83f1d 100644 --- a/merge_llama/results/._merged2_llama_ties_3/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_ties_5/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.8822495606326889, - "math_pass@1:1_samples_stderr": 0.01352392724365799 + "math_pass@1:1_samples": 0.718804920913884, + "math_pass@1:1_samples_stderr": 0.018864061823815267 }, "mm|arc_easy|0": { - "sem": 0.9676646706586827, - "sem_stderr": 0.0061251687998769505 + "sem": 0.9425149700598803, + "sem_stderr": 0.008060063884587028 }, "mm|math_500|0": { - "math_pass@1:1_samples": 0.851063829787234, - "math_pass@1:1_samples_stderr": 0.052493102531400944 + "math_pass@1:1_samples": 0.8936170212765957, + "math_pass@1:1_samples_stderr": 0.045460360315654445 }, "mm|arc_challenge|0": { - "sem": 0.9470404984423676, - "sem_stderr": 0.012519334141688527 + "sem": 0.9096573208722741, + "sem_stderr": 0.016025456258691062 }, "mm|truthfulqa|0": { - "sem": 0.7261904761904762, - "sem_stderr": 0.048945244607084167 + "sem": 0.7023809523809523, + "sem_stderr": 0.05018543326712822 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.8303886925795053, - "sem_stderr": 0.022348252015324283 + "sem": 0.8197879858657244, + "sem_stderr": 0.02288853998732925 }, "all": { - "math_pass@1:1_samples": 0.8666566952099615, - "math_pass@1:1_samples_stderr": 0.03300851488752947, - "sem": 0.6942568675742063, - "sem_stderr": 0.022484499890993482 + "math_pass@1:1_samples": 0.8062109710952399, + "math_pass@1:1_samples_stderr": 0.03216221106973485, + "sem": 0.6748682458357662, + "sem_stderr": 0.024289873349433888 } }, "versions": { diff --git a/merge_llama/results/._merged2_llama_ties_5/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_ties_7/results_2025-06-23T10-15-33.465228.json similarity index 51% rename from merge_llama/results/._merged2_llama_ties_5/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_ties_7/results_2025-06-23T10-15-33.465228.json index 473891c53c305bc561747b22c92430eeca56b255..b02fc1fc8b4e167b20c5744e06b867cf37218591 100644 --- a/merge_llama/results/._merged2_llama_ties_5/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_ties_7/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.8822495606326889, - "math_pass@1:1_samples_stderr": 0.013523927243657967 + "math_pass@1:1_samples": 0.7521968365553603, + "math_pass@1:1_samples_stderr": 0.018115278142167127 }, "mm|arc_easy|0": { - "sem": 0.9640718562874252, - "sem_stderr": 0.006444497606703675 + "sem": 0.9341317365269461, + "sem_stderr": 0.008589327166616617 }, "mm|math_500|0": { - "math_pass@1:1_samples": 1.0, - "math_pass@1:1_samples_stderr": 0.0 + "math_pass@1:1_samples": 0.9787234042553191, + "math_pass@1:1_samples_stderr": 0.02127659574468085 }, "mm|arc_challenge|0": { - "sem": 0.9345794392523364, - "sem_stderr": 0.013822616315653331 + "sem": 0.9003115264797508, + "sem_stderr": 0.01674726486693534 }, "mm|truthfulqa|0": { - "sem": 0.6785714285714286, - "sem_stderr": 0.051262641097241254 + "sem": 0.75, + "sem_stderr": 0.04752931878933585 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.8056537102473498, - "sem_stderr": 0.02356339039080162 + "sem": 0.8127208480565371, + "sem_stderr": 0.023232228103701378 }, "all": { - "math_pass@1:1_samples": 0.9411247803163445, - "math_pass@1:1_samples_stderr": 0.0067619636218289834, - "sem": 0.6765752868717081, - "sem_stderr": 0.02377328635259997 + "math_pass@1:1_samples": 0.8654601204053397, + "math_pass@1:1_samples_stderr": 0.01969593694342399, + "sem": 0.6794328222126468, + "sem_stderr": 0.024024534731647295 } }, "versions": { diff --git a/merge_llama/results/._merged2_llama_linear_7/results_2025-06-23T10-15-33.465228.json b/merge_llama/results/._merged_llama_llama_ties_9/results_2025-06-23T10-15-33.465228.json similarity index 54% rename from merge_llama/results/._merged2_llama_linear_7/results_2025-06-23T10-15-33.465228.json rename to merge_llama/results/._merged_llama_llama_ties_9/results_2025-06-23T10-15-33.465228.json index df930fecb4b0213e5cc0b747f24dff1f49941fd9..69c9eb15474b51c1ad8d294b3365b27f8d882b5e 100644 --- a/merge_llama/results/._merged2_llama_linear_7/results_2025-06-23T10-15-33.465228.json +++ b/merge_llama/results/._merged_llama_llama_ties_9/results_2025-06-23T10-15-33.465228.json @@ -1,37 +1,37 @@ { "results": { "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.8014059753954306, - "math_pass@1:1_samples_stderr": 0.016739219675508878 + "math_pass@1:1_samples": 0.7311072056239016, + "math_pass@1:1_samples_stderr": 0.018603983534727322 }, "mm|arc_easy|0": { - "sem": 0.9652694610778443, - "sem_stderr": 0.006340113301563085 + "sem": 0.9461077844311377, + "sem_stderr": 0.00781898359126103 }, "mm|math_500|0": { "math_pass@1:1_samples": 0.9361702127659575, - "math_pass@1:1_samples_stderr": 0.03604210867489021 + "math_pass@1:1_samples_stderr": 0.0360421086748902 }, "mm|arc_challenge|0": { - "sem": 0.9065420560747663, - "sem_stderr": 0.01627148163222446 + "sem": 0.8878504672897196, + "sem_stderr": 0.017639800082408303 }, "mm|truthfulqa|0": { - "sem": 0.7023809523809523, - "sem_stderr": 0.05018543326712823 + "sem": 0.7261904761904762, + "sem_stderr": 0.04894524460708416 }, "mm|gpqa_diamond|0": { "sem": 0.0 }, "mm|commonsenseqa|0": { - "sem": 0.7879858657243817, - "sem_stderr": 0.024339803458982758 + "sem": 0.8091872791519434, + "sem_stderr": 0.023399341265703564 }, "all": { - "math_pass@1:1_samples": 0.868788094080694, - "math_pass@1:1_samples_stderr": 0.026390664175199544, - "sem": 0.672435667051589, - "sem_stderr": 0.024284207914974633 + "math_pass@1:1_samples": 0.8336387091949296, + "math_pass@1:1_samples_stderr": 0.027323046104808763, + "sem": 0.6738672014126554, + "sem_stderr": 0.024450842386614262 } }, "versions": { diff --git a/merge_qwen/logs/qwen_dare_linear_1.log b/merge_qwen/logs/qwen_dare_linear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..3224e892bb1a97e4eef7e731f9fcc530b4d18862 --- /dev/null +++ b/merge_qwen/logs/qwen_dare_linear_1.log @@ -0,0 +1,96 @@ +INFO 07-09 12:40:23 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 12:40:32 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'classify', 'generate', 'embed'}. Defaulting to 'generate'. +INFO 07-09 12:40:32 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 12:40:32 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 12:40:33 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_dare_linear_1', speculative_config=None, tokenizer='./merged_qwen/qwen_dare_linear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_dare_linear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 12:40:33 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 12:40:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e18568ba'), local_subscribe_addr='ipc:///tmp/4c08e1b8-23c6-4638-aa66-d988b5c57b4e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 12:40:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 12:40:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_84700e6e'), local_subscribe_addr='ipc:///tmp/a1b33f74-0c8c-4985-827d-b222704a04c1', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_36d716d8'), local_subscribe_addr='ipc:///tmp/5be46ad1-d88e-4645-84e9-f975815cd20e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 12:40:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 12:40:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b149bcc3'), local_subscribe_addr='ipc:///tmp/82b83e59-45f9-459e-bab3-ddc5f6a6706d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c9288e3c'), local_subscribe_addr='ipc:///tmp/d5b12ec0-248e-43b4-9fb7-b62f98abe9ec', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=726323) WARNING 07-09 12:40:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=726322) WARNING 07-09 12:40:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=726320) WARNING 07-09 12:40:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=726321) WARNING 07-09 12:40:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_ad16e6e4'), local_subscribe_addr='ipc:///tmp/d78b4d8b-9d88-4b91-abe5-dac972bb32cd', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:36 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:36 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:36 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:36 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=726322) WARNING 07-09 12:40:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=726323) WARNING 07-09 12:40:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=726320) WARNING 07-09 12:40:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=726321) WARNING 07-09 12:40:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:36 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_1... +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:36 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_1... +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:36 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_1... +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:36 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_1... +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:47 [loader.py:458] Loading weights took 10.28 seconds +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:47 [loader.py:458] Loading weights took 10.31 seconds +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:47 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 10.543841 seconds +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:47 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 10.567992 seconds +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:47 [loader.py:458] Loading weights took 10.78 seconds +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:47 [loader.py:458] Loading weights took 10.79 seconds +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:48 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 11.066852 seconds +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:48 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 11.111445 seconds +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:57 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f3313a81ed/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=726322) INFO 07-09 12:40:57 [backends.py:430] Dynamo bytecode transform time: 9.73 s +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f3313a81ed/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=726323) INFO 07-09 12:40:58 [backends.py:430] Dynamo bytecode transform time: 9.79 s +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f3313a81ed/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=726321) INFO 07-09 12:40:58 [backends.py:430] Dynamo bytecode transform time: 9.93 s +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/f3313a81ed/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=726320) INFO 07-09 12:40:58 [backends.py:430] Dynamo bytecode transform time: 9.99 s +(VllmWorker rank=2 pid=726322) INFO 07-09 12:41:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.901 s +(VllmWorker rank=3 pid=726323) INFO 07-09 12:41:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.865 s +(VllmWorker rank=1 pid=726321) INFO 07-09 12:41:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.841 s +(VllmWorker rank=0 pid=726320) INFO 07-09 12:41:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.956 s +(VllmWorker rank=2 pid=726322) INFO 07-09 12:41:15 [monitor.py:33] torch.compile takes 9.73 s in total +(VllmWorker rank=1 pid=726321) INFO 07-09 12:41:15 [monitor.py:33] torch.compile takes 9.93 s in total +(VllmWorker rank=0 pid=726320) INFO 07-09 12:41:15 [monitor.py:33] torch.compile takes 9.99 s in total +(VllmWorker rank=3 pid=726323) INFO 07-09 12:41:15 [monitor.py:33] torch.compile takes 9.79 s in total +INFO 07-09 12:41:16 [kv_cache_utils.py:634] GPU KV cache size: 1,262,080 tokens +INFO 07-09 12:41:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.25x +INFO 07-09 12:41:16 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 12:41:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 12:41:16 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 12:41:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 12:41:16 [kv_cache_utils.py:634] GPU KV cache size: 1,262,752 tokens +INFO 07-09 12:41:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.58x +(VllmWorker rank=3 pid=726323) INFO 07-09 12:41:47 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=2 pid=726322) INFO 07-09 12:41:47 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=1 pid=726321) INFO 07-09 12:41:47 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=0 pid=726320) INFO 07-09 12:41:47 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +INFO 07-09 12:41:48 [core.py:159] init engine (profile, create kv cache, warmup model) took 59.84 seconds +INFO 07-09 12:41:48 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 13:02:30 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 13:02:30 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value| |Stderr| +|------------------|------:|---------------------|----:|---|-----:| +|all | |sem | 0|± | 0| +| | |math_pass@1:1_samples| 0|± | 0| +|mm\|arc_challenge\|0| 0|sem | 0|± | 0| +|mm\|arc_easy\|0 | 0|sem | 0|± | 0| +|mm\|commonsenseqa\|0| 0|sem | 0|± | 0| +|mm\|gpqa_diamond\|0 | 2|sem | 0| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples| 0|± | 0| +|mm\|math_500\|0 | 3|math_pass@1:1_samples| 0|± | 0| +|mm\|truthfulqa\|0 | 0|sem | 0|± | 0| + diff --git a/merge_qwen/logs/qwen_dare_linear_3.log b/merge_qwen/logs/qwen_dare_linear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..fffc197fcf6e5d2b8830e7dff625585df39f1db5 --- /dev/null +++ b/merge_qwen/logs/qwen_dare_linear_3.log @@ -0,0 +1,100 @@ +INFO 07-09 13:02:29 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 13:02:39 [config.py:717] This model supports multiple tasks: {'reward', 'generate', 'embed', 'classify', 'score'}. Defaulting to 'generate'. +INFO 07-09 13:02:39 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 13:02:39 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 13:02:40 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_dare_linear_3', speculative_config=None, tokenizer='./merged_qwen/qwen_dare_linear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_dare_linear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 13:02:40 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 13:02:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_23484c64'), local_subscribe_addr='ipc:///tmp/c405caa1-9faa-4c3a-924f-83ce0b2c7f1b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:02:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=738478) INFO 07-09 13:02:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c296096e'), local_subscribe_addr='ipc:///tmp/29a8be74-b254-42df-b94e-009c15e43769', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:02:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=738477) INFO 07-09 13:02:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f5da5060'), local_subscribe_addr='ipc:///tmp/6c5c81e1-ed72-4719-8930-8f365356cc8a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:02:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 13:02:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=738479) INFO 07-09 13:02:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a7c068d2'), local_subscribe_addr='ipc:///tmp/2e1fa81b-177f-41a5-82b8-bc9f591418e7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=738480) INFO 07-09 13:02:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e8b4e25a'), local_subscribe_addr='ipc:///tmp/da1281c8-078e-438c-94e5-28c212c1a96b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=738477) INFO 07-09 13:02:42 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=738478) INFO 07-09 13:02:42 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=738477) INFO 07-09 13:02:42 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=738478) INFO 07-09 13:02:42 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=738480) INFO 07-09 13:02:42 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=738479) INFO 07-09 13:02:42 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=738480) INFO 07-09 13:02:42 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=738479) INFO 07-09 13:02:42 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=738480) WARNING 07-09 13:02:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=738479) WARNING 07-09 13:02:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=738477) WARNING 07-09 13:02:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=738478) WARNING 07-09 13:02:43 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=738477) INFO 07-09 13:02:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_1b718ac7'), local_subscribe_addr='ipc:///tmp/a57e1000-3e16-40f6-b2e6-b36f12db7f57', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=738480) INFO 07-09 13:02:43 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=738479) INFO 07-09 13:02:43 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=738478) INFO 07-09 13:02:43 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=738477) INFO 07-09 13:02:43 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=738479) INFO 07-09 13:02:43 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=738480) INFO 07-09 13:02:43 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=738479) WARNING 07-09 13:02:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=738478) INFO 07-09 13:02:43 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=738477) INFO 07-09 13:02:43 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=738480) WARNING 07-09 13:02:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=738477) WARNING 07-09 13:02:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=738478) WARNING 07-09 13:02:43 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=738480) INFO 07-09 13:02:43 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_3... +(VllmWorker rank=1 pid=738478) INFO 07-09 13:02:43 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_3... +(VllmWorker rank=2 pid=738479) INFO 07-09 13:02:43 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_3... +(VllmWorker rank=0 pid=738477) INFO 07-09 13:02:43 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_3... +(VllmWorker rank=1 pid=738478) INFO 07-09 13:03:00 [loader.py:458] Loading weights took 17.22 seconds +(VllmWorker rank=3 pid=738480) INFO 07-09 13:03:00 [loader.py:458] Loading weights took 17.32 seconds +(VllmWorker rank=2 pid=738479) INFO 07-09 13:03:00 [loader.py:458] Loading weights took 17.31 seconds +(VllmWorker rank=0 pid=738477) INFO 07-09 13:03:00 [loader.py:458] Loading weights took 17.29 seconds +(VllmWorker rank=1 pid=738478) INFO 07-09 13:03:00 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 17.498639 seconds +(VllmWorker rank=3 pid=738480) INFO 07-09 13:03:00 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 17.548393 seconds +(VllmWorker rank=2 pid=738479) INFO 07-09 13:03:00 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 17.548004 seconds +(VllmWorker rank=0 pid=738477) INFO 07-09 13:03:01 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 17.561457 seconds +(VllmWorker rank=2 pid=738479) INFO 07-09 13:03:10 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5f8caf6c2c/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=738480) INFO 07-09 13:03:10 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5f8caf6c2c/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=738479) INFO 07-09 13:03:10 [backends.py:430] Dynamo bytecode transform time: 9.72 s +(VllmWorker rank=3 pid=738480) INFO 07-09 13:03:10 [backends.py:430] Dynamo bytecode transform time: 9.72 s +(VllmWorker rank=1 pid=738478) INFO 07-09 13:03:10 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5f8caf6c2c/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=738478) INFO 07-09 13:03:10 [backends.py:430] Dynamo bytecode transform time: 9.75 s +(VllmWorker rank=0 pid=738477) INFO 07-09 13:03:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5f8caf6c2c/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=738477) INFO 07-09 13:03:11 [backends.py:430] Dynamo bytecode transform time: 10.04 s +(VllmWorker rank=1 pid=738478) INFO 07-09 13:03:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=738480) INFO 07-09 13:03:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=738479) INFO 07-09 13:03:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=738477) INFO 07-09 13:03:14 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=738479) INFO 07-09 13:03:47 [backends.py:148] Compiling a graph for general shape takes 35.73 s +(VllmWorker rank=3 pid=738480) INFO 07-09 13:03:47 [backends.py:148] Compiling a graph for general shape takes 35.87 s +(VllmWorker rank=1 pid=738478) INFO 07-09 13:03:47 [backends.py:148] Compiling a graph for general shape takes 35.87 s +(VllmWorker rank=0 pid=738477) INFO 07-09 13:03:48 [backends.py:148] Compiling a graph for general shape takes 36.34 s +(VllmWorker rank=2 pid=738479) INFO 07-09 13:04:13 [monitor.py:33] torch.compile takes 45.45 s in total +(VllmWorker rank=3 pid=738480) INFO 07-09 13:04:13 [monitor.py:33] torch.compile takes 45.59 s in total +(VllmWorker rank=1 pid=738478) INFO 07-09 13:04:13 [monitor.py:33] torch.compile takes 45.61 s in total +(VllmWorker rank=0 pid=738477) INFO 07-09 13:04:13 [monitor.py:33] torch.compile takes 46.38 s in total +INFO 07-09 13:04:15 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 13:04:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 13:04:15 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 13:04:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 13:04:15 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 13:04:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 13:04:15 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 13:04:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=2 pid=738479) INFO 07-09 13:04:50 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=1 pid=738478) INFO 07-09 13:04:50 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=3 pid=738480) INFO 07-09 13:04:50 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=0 pid=738477) INFO 07-09 13:04:50 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +INFO 07-09 13:04:51 [core.py:159] init engine (profile, create kv cache, warmup model) took 109.91 seconds +INFO 07-09 13:04:51 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 13:25:37 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 13:25:37 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value| |Stderr| +|------------------|------:|---------------------|----:|---|-----:| +|all | |sem | 0|± | 0| +| | |math_pass@1:1_samples| 0|± | 0| +|mm\|arc_challenge\|0| 0|sem | 0|± | 0| +|mm\|arc_easy\|0 | 0|sem | 0|± | 0| +|mm\|commonsenseqa\|0| 0|sem | 0|± | 0| +|mm\|gpqa_diamond\|0 | 2|sem | 0| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples| 0|± | 0| +|mm\|math_500\|0 | 3|math_pass@1:1_samples| 0|± | 0| +|mm\|truthfulqa\|0 | 0|sem | 0|± | 0| + diff --git a/merge_qwen/logs/qwen_dare_linear_5.log b/merge_qwen/logs/qwen_dare_linear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..a0939f0201d09400e377f9e6513ccc0806d910cb --- /dev/null +++ b/merge_qwen/logs/qwen_dare_linear_5.log @@ -0,0 +1,100 @@ +INFO 07-09 13:25:36 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 13:25:44 [config.py:717] This model supports multiple tasks: {'generate', 'embed', 'classify', 'reward', 'score'}. Defaulting to 'generate'. +INFO 07-09 13:25:45 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 13:25:45 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 13:25:46 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_dare_linear_5', speculative_config=None, tokenizer='./merged_qwen/qwen_dare_linear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_dare_linear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 13:25:46 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 13:25:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_58bf6ecc'), local_subscribe_addr='ipc:///tmp/fa0f8ac6-3aeb-4743-9683-9b2e9e0b676f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:25:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=748443) INFO 07-09 13:25:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b6c1becf'), local_subscribe_addr='ipc:///tmp/cd5f611a-b8c1-4ce0-99be-bcf86b79b833', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:25:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=748442) INFO 07-09 13:25:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_edfde738'), local_subscribe_addr='ipc:///tmp/8a80d596-2afe-4d6a-b5a8-2f325e88b2e6', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:25:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 13:25:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=748444) INFO 07-09 13:25:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b689839b'), local_subscribe_addr='ipc:///tmp/25919854-9060-41ba-a869-025f641054e4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=748445) INFO 07-09 13:25:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9e35228c'), local_subscribe_addr='ipc:///tmp/3a8b913b-7b4c-484e-a035-22bafdfeb255', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=748443) INFO 07-09 13:25:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=748443) INFO 07-09 13:25:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=748442) INFO 07-09 13:25:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=748442) INFO 07-09 13:25:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=748445) INFO 07-09 13:25:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=748444) INFO 07-09 13:25:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=748445) INFO 07-09 13:25:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=748444) INFO 07-09 13:25:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=748443) WARNING 07-09 13:25:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=748444) WARNING 07-09 13:25:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=748445) WARNING 07-09 13:25:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=748442) WARNING 07-09 13:25:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=748442) INFO 07-09 13:25:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_2b97d933'), local_subscribe_addr='ipc:///tmp/2e3312bb-5350-48ba-b8ba-0f88bb2b08b4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=748445) INFO 07-09 13:25:49 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=748444) INFO 07-09 13:25:49 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=748443) INFO 07-09 13:25:49 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=748442) INFO 07-09 13:25:49 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=748443) INFO 07-09 13:25:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=748444) INFO 07-09 13:25:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=748445) INFO 07-09 13:25:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=748443) WARNING 07-09 13:25:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=748444) WARNING 07-09 13:25:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=748445) WARNING 07-09 13:25:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=748442) INFO 07-09 13:25:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=748442) WARNING 07-09 13:25:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=748443) INFO 07-09 13:25:49 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_5... +(VllmWorker rank=2 pid=748444) INFO 07-09 13:25:49 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_5... +(VllmWorker rank=3 pid=748445) INFO 07-09 13:25:49 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_5... +(VllmWorker rank=0 pid=748442) INFO 07-09 13:25:49 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_5... +(VllmWorker rank=3 pid=748445) INFO 07-09 13:26:16 [loader.py:458] Loading weights took 27.28 seconds +(VllmWorker rank=1 pid=748443) INFO 07-09 13:26:16 [loader.py:458] Loading weights took 27.52 seconds +(VllmWorker rank=2 pid=748444) INFO 07-09 13:26:16 [loader.py:458] Loading weights took 27.51 seconds +(VllmWorker rank=0 pid=748442) INFO 07-09 13:26:16 [loader.py:458] Loading weights took 27.49 seconds +(VllmWorker rank=3 pid=748445) INFO 07-09 13:26:16 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.532010 seconds +(VllmWorker rank=2 pid=748444) INFO 07-09 13:26:17 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.769541 seconds +(VllmWorker rank=1 pid=748443) INFO 07-09 13:26:17 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.767690 seconds +(VllmWorker rank=0 pid=748442) INFO 07-09 13:26:17 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.750476 seconds +(VllmWorker rank=3 pid=748445) INFO 07-09 13:26:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/18362f057b/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=748444) INFO 07-09 13:26:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/18362f057b/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=748445) INFO 07-09 13:26:26 [backends.py:430] Dynamo bytecode transform time: 9.51 s +(VllmWorker rank=2 pid=748444) INFO 07-09 13:26:26 [backends.py:430] Dynamo bytecode transform time: 9.51 s +(VllmWorker rank=0 pid=748442) INFO 07-09 13:26:26 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/18362f057b/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=748442) INFO 07-09 13:26:26 [backends.py:430] Dynamo bytecode transform time: 9.57 s +(VllmWorker rank=1 pid=748443) INFO 07-09 13:26:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/18362f057b/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=748443) INFO 07-09 13:26:27 [backends.py:430] Dynamo bytecode transform time: 9.88 s +(VllmWorker rank=2 pid=748444) INFO 07-09 13:26:29 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=748445) INFO 07-09 13:26:29 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=748442) INFO 07-09 13:26:29 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=748443) INFO 07-09 13:26:30 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=748444) INFO 07-09 13:27:02 [backends.py:148] Compiling a graph for general shape takes 35.50 s +(VllmWorker rank=3 pid=748445) INFO 07-09 13:27:02 [backends.py:148] Compiling a graph for general shape takes 35.70 s +(VllmWorker rank=0 pid=748442) INFO 07-09 13:27:03 [backends.py:148] Compiling a graph for general shape takes 36.45 s +(VllmWorker rank=1 pid=748443) INFO 07-09 13:27:03 [backends.py:148] Compiling a graph for general shape takes 36.21 s +(VllmWorker rank=3 pid=748445) INFO 07-09 13:27:29 [monitor.py:33] torch.compile takes 45.21 s in total +(VllmWorker rank=1 pid=748443) INFO 07-09 13:27:29 [monitor.py:33] torch.compile takes 46.09 s in total +(VllmWorker rank=0 pid=748442) INFO 07-09 13:27:29 [monitor.py:33] torch.compile takes 46.02 s in total +(VllmWorker rank=2 pid=748444) INFO 07-09 13:27:29 [monitor.py:33] torch.compile takes 45.01 s in total +INFO 07-09 13:27:30 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 13:27:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 13:27:30 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 13:27:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 13:27:30 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 13:27:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 13:27:30 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 13:27:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=2 pid=748444) INFO 07-09 13:28:06 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.55 GiB +(VllmWorker rank=0 pid=748442) INFO 07-09 13:28:06 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.55 GiB +(VllmWorker rank=3 pid=748445) INFO 07-09 13:28:06 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.55 GiB +(VllmWorker rank=1 pid=748443) INFO 07-09 13:28:06 [gpu_model_runner.py:1686] Graph capturing finished in 35 secs, took 3.55 GiB +INFO 07-09 13:28:06 [core.py:159] init engine (profile, create kv cache, warmup model) took 109.12 seconds +INFO 07-09 13:28:06 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 13:48:47 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 13:48:47 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value| |Stderr| +|------------------|------:|---------------------|----:|---|-----:| +|all | |sem | 0|± | 0| +| | |math_pass@1:1_samples| 0|± | 0| +|mm\|arc_challenge\|0| 0|sem | 0|± | 0| +|mm\|arc_easy\|0 | 0|sem | 0|± | 0| +|mm\|commonsenseqa\|0| 0|sem | 0|± | 0| +|mm\|gpqa_diamond\|0 | 2|sem | 0| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples| 0|± | 0| +|mm\|math_500\|0 | 3|math_pass@1:1_samples| 0|± | 0| +|mm\|truthfulqa\|0 | 0|sem | 0|± | 0| + diff --git a/merge_qwen/logs/qwen_dare_linear_7.log b/merge_qwen/logs/qwen_dare_linear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..a4e757a1defdd3b93e86b399a435ccdce0f1f707 --- /dev/null +++ b/merge_qwen/logs/qwen_dare_linear_7.log @@ -0,0 +1,100 @@ +INFO 07-09 13:48:46 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 13:48:55 [config.py:717] This model supports multiple tasks: {'embed', 'generate', 'classify', 'reward', 'score'}. Defaulting to 'generate'. +INFO 07-09 13:48:55 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 13:48:55 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 13:48:57 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_dare_linear_7', speculative_config=None, tokenizer='./merged_qwen/qwen_dare_linear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_dare_linear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 13:48:57 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 13:48:57 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e6147c7a'), local_subscribe_addr='ipc:///tmp/4a03c737-f934-4d56-b092-3f3f2aa7516e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:48:57 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=757642) INFO 07-09 13:48:57 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_dafb842b'), local_subscribe_addr='ipc:///tmp/17ab7204-cad3-4d6b-b2c1-79780c59f077', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:48:57 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=757641) INFO 07-09 13:48:57 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5222c4d9'), local_subscribe_addr='ipc:///tmp/00741352-3d5d-4eaa-9322-770fb94cb153', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 13:48:57 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 13:48:57 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=757643) INFO 07-09 13:48:57 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1a2d2879'), local_subscribe_addr='ipc:///tmp/997706a1-8e27-4278-aaac-d2e36adc3d50', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=757644) INFO 07-09 13:48:57 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_426010d1'), local_subscribe_addr='ipc:///tmp/7c81da98-3548-4da8-91f3-4b4439fd8e1b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=757641) INFO 07-09 13:48:58 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=757642) INFO 07-09 13:48:58 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=757643) INFO 07-09 13:48:58 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=757641) INFO 07-09 13:48:58 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=757642) INFO 07-09 13:48:58 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=757643) INFO 07-09 13:48:58 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=757644) INFO 07-09 13:48:58 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=757644) INFO 07-09 13:48:58 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=757644) WARNING 07-09 13:48:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=757643) WARNING 07-09 13:48:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=757642) WARNING 07-09 13:48:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=757641) WARNING 07-09 13:48:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=757641) INFO 07-09 13:48:59 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_463ff58d'), local_subscribe_addr='ipc:///tmp/df8e06b3-fce6-4b51-9ca2-5c194c6c3202', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=757644) INFO 07-09 13:48:59 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=757643) INFO 07-09 13:48:59 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=757642) INFO 07-09 13:48:59 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=757641) INFO 07-09 13:48:59 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=757644) INFO 07-09 13:48:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=757643) INFO 07-09 13:48:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=757644) WARNING 07-09 13:48:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=757643) WARNING 07-09 13:48:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=757642) INFO 07-09 13:48:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=757642) WARNING 07-09 13:48:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=757641) INFO 07-09 13:48:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=757641) WARNING 07-09 13:48:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=757644) INFO 07-09 13:48:59 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_7... +(VllmWorker rank=2 pid=757643) INFO 07-09 13:48:59 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_7... +(VllmWorker rank=1 pid=757642) INFO 07-09 13:48:59 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_7... +(VllmWorker rank=0 pid=757641) INFO 07-09 13:48:59 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_7... +(VllmWorker rank=3 pid=757644) INFO 07-09 13:49:26 [loader.py:458] Loading weights took 27.27 seconds +(VllmWorker rank=3 pid=757644) INFO 07-09 13:49:27 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.505146 seconds +(VllmWorker rank=2 pid=757643) INFO 07-09 13:49:27 [loader.py:458] Loading weights took 27.53 seconds +(VllmWorker rank=1 pid=757642) INFO 07-09 13:49:27 [loader.py:458] Loading weights took 27.49 seconds +(VllmWorker rank=0 pid=757641) INFO 07-09 13:49:27 [loader.py:458] Loading weights took 27.50 seconds +(VllmWorker rank=2 pid=757643) INFO 07-09 13:49:27 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.773954 seconds +(VllmWorker rank=1 pid=757642) INFO 07-09 13:49:27 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.773647 seconds +(VllmWorker rank=0 pid=757641) INFO 07-09 13:49:27 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 27.762227 seconds +(VllmWorker rank=2 pid=757643) INFO 07-09 13:49:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/52efc80eb4/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=757644) INFO 07-09 13:49:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/52efc80eb4/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=757642) INFO 07-09 13:49:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/52efc80eb4/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=757643) INFO 07-09 13:49:37 [backends.py:430] Dynamo bytecode transform time: 9.84 s +(VllmWorker rank=3 pid=757644) INFO 07-09 13:49:37 [backends.py:430] Dynamo bytecode transform time: 9.84 s +(VllmWorker rank=1 pid=757642) INFO 07-09 13:49:37 [backends.py:430] Dynamo bytecode transform time: 9.84 s +(VllmWorker rank=0 pid=757641) INFO 07-09 13:49:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/52efc80eb4/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=757641) INFO 07-09 13:49:37 [backends.py:430] Dynamo bytecode transform time: 9.97 s +(VllmWorker rank=3 pid=757644) INFO 07-09 13:49:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=757642) INFO 07-09 13:49:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=757643) INFO 07-09 13:49:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=757641) INFO 07-09 13:49:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=757643) INFO 07-09 13:50:14 [backends.py:148] Compiling a graph for general shape takes 36.28 s +(VllmWorker rank=1 pid=757642) INFO 07-09 13:50:14 [backends.py:148] Compiling a graph for general shape takes 36.33 s +(VllmWorker rank=3 pid=757644) INFO 07-09 13:50:14 [backends.py:148] Compiling a graph for general shape takes 36.44 s +(VllmWorker rank=0 pid=757641) INFO 07-09 13:50:15 [backends.py:148] Compiling a graph for general shape takes 37.66 s +(VllmWorker rank=2 pid=757643) INFO 07-09 13:50:41 [monitor.py:33] torch.compile takes 46.12 s in total +(VllmWorker rank=1 pid=757642) INFO 07-09 13:50:41 [monitor.py:33] torch.compile takes 46.17 s in total +(VllmWorker rank=3 pid=757644) INFO 07-09 13:50:41 [monitor.py:33] torch.compile takes 46.28 s in total +(VllmWorker rank=0 pid=757641) INFO 07-09 13:50:41 [monitor.py:33] torch.compile takes 47.63 s in total +INFO 07-09 13:50:42 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 13:50:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 13:50:42 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 13:50:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 13:50:42 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 13:50:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 13:50:42 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 13:50:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=3 pid=757644) INFO 07-09 13:51:32 [gpu_model_runner.py:1686] Graph capturing finished in 49 secs, took 3.55 GiB +(VllmWorker rank=1 pid=757642) INFO 07-09 13:51:32 [gpu_model_runner.py:1686] Graph capturing finished in 49 secs, took 3.55 GiB +(VllmWorker rank=2 pid=757643) INFO 07-09 13:51:32 [gpu_model_runner.py:1686] Graph capturing finished in 49 secs, took 3.55 GiB +(VllmWorker rank=0 pid=757641) INFO 07-09 13:51:32 [gpu_model_runner.py:1686] Graph capturing finished in 49 secs, took 3.55 GiB +INFO 07-09 13:51:32 [core.py:159] init engine (profile, create kv cache, warmup model) took 125.10 seconds +INFO 07-09 13:51:32 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 14:12:27 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 14:12:27 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value| |Stderr| +|------------------|------:|---------------------|----:|---|-----:| +|all | |sem | 0|± | 0| +| | |math_pass@1:1_samples| 0|± | 0| +|mm\|arc_challenge\|0| 0|sem | 0|± | 0| +|mm\|arc_easy\|0 | 0|sem | 0|± | 0| +|mm\|commonsenseqa\|0| 0|sem | 0|± | 0| +|mm\|gpqa_diamond\|0 | 2|sem | 0| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples| 0|± | 0| +|mm\|math_500\|0 | 3|math_pass@1:1_samples| 0|± | 0| +|mm\|truthfulqa\|0 | 0|sem | 0|± | 0| + diff --git a/merge_qwen/logs/qwen_dare_linear_9.log b/merge_qwen/logs/qwen_dare_linear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..497e6bce8ac1b6f7766318312ec48a1ae17aa97f --- /dev/null +++ b/merge_qwen/logs/qwen_dare_linear_9.log @@ -0,0 +1,100 @@ +INFO 07-09 14:12:26 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 14:12:35 [config.py:717] This model supports multiple tasks: {'generate', 'reward', 'embed', 'score', 'classify'}. Defaulting to 'generate'. +INFO 07-09 14:12:35 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 14:12:35 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 14:12:37 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_dare_linear_9', speculative_config=None, tokenizer='./merged_qwen/qwen_dare_linear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_dare_linear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 14:12:37 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 14:12:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_4633c140'), local_subscribe_addr='ipc:///tmp/c53a6a04-c288-4c8b-b1b2-c1855513ebd7', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 14:12:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 14:12:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 14:12:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 14:12:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=767049) INFO 07-09 14:12:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bace012f'), local_subscribe_addr='ipc:///tmp/b965fc5a-3b1e-4f92-99df-3e3ba040a17c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=767050) INFO 07-09 14:12:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5cb29da9'), local_subscribe_addr='ipc:///tmp/bb1a7e11-4576-4c18-bbf1-6efa3c58ea7d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=767051) INFO 07-09 14:12:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bb9c0cea'), local_subscribe_addr='ipc:///tmp/37f32a9a-b046-40eb-8af2-b842fc6080a6', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=767052) INFO 07-09 14:12:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0dd1c516'), local_subscribe_addr='ipc:///tmp/d019599d-da09-466d-aad5-47d284d19976', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=767050) INFO 07-09 14:12:38 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=767049) INFO 07-09 14:12:38 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=767050) INFO 07-09 14:12:38 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=767049) INFO 07-09 14:12:38 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=767052) INFO 07-09 14:12:38 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=767051) INFO 07-09 14:12:38 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=767052) INFO 07-09 14:12:38 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=767051) INFO 07-09 14:12:38 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=767052) WARNING 07-09 14:12:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=767051) WARNING 07-09 14:12:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=767050) WARNING 07-09 14:12:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=767049) WARNING 07-09 14:12:39 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=767049) INFO 07-09 14:12:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_6f6727a6'), local_subscribe_addr='ipc:///tmp/33a6347c-5ef5-4f6a-801d-48bdc0775b19', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=767050) INFO 07-09 14:12:39 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=767052) INFO 07-09 14:12:39 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=767049) INFO 07-09 14:12:39 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=767051) INFO 07-09 14:12:39 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=767050) INFO 07-09 14:12:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=767052) INFO 07-09 14:12:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=767051) INFO 07-09 14:12:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=767052) WARNING 07-09 14:12:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=767049) INFO 07-09 14:12:39 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=767050) WARNING 07-09 14:12:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=767051) WARNING 07-09 14:12:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=767049) WARNING 07-09 14:12:39 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=767052) INFO 07-09 14:12:39 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_9... +(VllmWorker rank=2 pid=767051) INFO 07-09 14:12:39 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_9... +(VllmWorker rank=1 pid=767050) INFO 07-09 14:12:39 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_9... +(VllmWorker rank=0 pid=767049) INFO 07-09 14:12:39 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_dare_linear_9... +(VllmWorker rank=2 pid=767051) INFO 07-09 14:13:06 [loader.py:458] Loading weights took 26.41 seconds +(VllmWorker rank=3 pid=767052) INFO 07-09 14:13:06 [loader.py:458] Loading weights took 26.54 seconds +(VllmWorker rank=1 pid=767050) INFO 07-09 14:13:06 [loader.py:458] Loading weights took 26.54 seconds +(VllmWorker rank=0 pid=767049) INFO 07-09 14:13:06 [loader.py:458] Loading weights took 26.54 seconds +(VllmWorker rank=2 pid=767051) INFO 07-09 14:13:06 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.698174 seconds +(VllmWorker rank=3 pid=767052) INFO 07-09 14:13:06 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.820126 seconds +(VllmWorker rank=1 pid=767050) INFO 07-09 14:13:06 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.821565 seconds +(VllmWorker rank=0 pid=767049) INFO 07-09 14:13:06 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.814644 seconds +(VllmWorker rank=3 pid=767052) INFO 07-09 14:13:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/67d23c94bd/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=767052) INFO 07-09 14:13:17 [backends.py:430] Dynamo bytecode transform time: 10.55 s +(VllmWorker rank=1 pid=767050) INFO 07-09 14:13:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/67d23c94bd/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=767050) INFO 07-09 14:13:17 [backends.py:430] Dynamo bytecode transform time: 10.61 s +(VllmWorker rank=0 pid=767049) INFO 07-09 14:13:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/67d23c94bd/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=767049) INFO 07-09 14:13:17 [backends.py:430] Dynamo bytecode transform time: 10.69 s +(VllmWorker rank=2 pid=767051) INFO 07-09 14:13:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/67d23c94bd/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=767051) INFO 07-09 14:13:17 [backends.py:430] Dynamo bytecode transform time: 10.75 s +(VllmWorker rank=3 pid=767052) INFO 07-09 14:13:20 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=767050) INFO 07-09 14:13:20 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=767051) INFO 07-09 14:13:20 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=767049) INFO 07-09 14:13:20 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=767050) INFO 07-09 14:13:54 [backends.py:148] Compiling a graph for general shape takes 36.56 s +(VllmWorker rank=3 pid=767052) INFO 07-09 14:13:54 [backends.py:148] Compiling a graph for general shape takes 37.05 s +(VllmWorker rank=2 pid=767051) INFO 07-09 14:13:55 [backends.py:148] Compiling a graph for general shape takes 37.42 s +(VllmWorker rank=0 pid=767049) INFO 07-09 14:13:55 [backends.py:148] Compiling a graph for general shape takes 37.58 s +(VllmWorker rank=3 pid=767052) INFO 07-09 14:14:20 [monitor.py:33] torch.compile takes 47.60 s in total +(VllmWorker rank=1 pid=767050) INFO 07-09 14:14:20 [monitor.py:33] torch.compile takes 47.17 s in total +(VllmWorker rank=2 pid=767051) INFO 07-09 14:14:20 [monitor.py:33] torch.compile takes 48.17 s in total +(VllmWorker rank=0 pid=767049) INFO 07-09 14:14:20 [monitor.py:33] torch.compile takes 48.27 s in total +INFO 07-09 14:14:22 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 14:14:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 14:14:22 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 14:14:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 14:14:22 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 14:14:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 14:14:22 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 14:14:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=1 pid=767050) INFO 07-09 14:15:13 [gpu_model_runner.py:1686] Graph capturing finished in 51 secs, took 3.55 GiB +(VllmWorker rank=2 pid=767051) INFO 07-09 14:15:13 [gpu_model_runner.py:1686] Graph capturing finished in 51 secs, took 3.55 GiB +(VllmWorker rank=0 pid=767049) INFO 07-09 14:15:13 [gpu_model_runner.py:1686] Graph capturing finished in 51 secs, took 3.55 GiB +(VllmWorker rank=3 pid=767052) INFO 07-09 14:15:13 [gpu_model_runner.py:1686] Graph capturing finished in 51 secs, took 3.55 GiB +INFO 07-09 14:15:13 [core.py:159] init engine (profile, create kv cache, warmup model) took 127.34 seconds +INFO 07-09 14:15:14 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 14:36:00 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 14:36:00 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value| |Stderr| +|------------------|------:|---------------------|----:|---|-----:| +|all | |sem | 0|± | 0| +| | |math_pass@1:1_samples| 0|± | 0| +|mm\|arc_challenge\|0| 0|sem | 0|± | 0| +|mm\|arc_easy\|0 | 0|sem | 0|± | 0| +|mm\|commonsenseqa\|0| 0|sem | 0|± | 0| +|mm\|gpqa_diamond\|0 | 2|sem | 0| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples| 0|± | 0| +|mm\|math_500\|0 | 3|math_pass@1:1_samples| 0|± | 0| +|mm\|truthfulqa\|0 | 0|sem | 0|± | 0| + diff --git a/merge_qwen/logs/qwen_linear_1.log b/merge_qwen/logs/qwen_linear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..bb19d0efc4776269ea2bcc438f6fa25016eb992b --- /dev/null +++ b/merge_qwen/logs/qwen_linear_1.log @@ -0,0 +1,96 @@ +INFO 07-09 20:08:47 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:08:56 [config.py:717] This model supports multiple tasks: {'reward', 'classify', 'generate', 'score', 'embed'}. Defaulting to 'generate'. +INFO 07-09 20:08:56 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:08:56 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:08:57 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_linear_1', speculative_config=None, tokenizer='./merged_qwen/qwen_linear_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_linear_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:08:57 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:08:57 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_08e32528'), local_subscribe_addr='ipc:///tmp/8f067851-9bce-4507-90a2-b49a1a6d21e6', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:08:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:08:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:08:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:08:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=972899) INFO 07-09 20:08:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_60203d48'), local_subscribe_addr='ipc:///tmp/8a1578e8-0ea0-4a94-95f6-b35a63557cf7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=972900) INFO 07-09 20:08:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_17f55208'), local_subscribe_addr='ipc:///tmp/441c952f-5362-40d1-84e3-53878f04c284', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=972901) INFO 07-09 20:08:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_79a16e35'), local_subscribe_addr='ipc:///tmp/7a863236-d6f9-4d1b-9b48-cd8f9944b893', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=972902) INFO 07-09 20:08:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a3e9e2e1'), local_subscribe_addr='ipc:///tmp/e5a04665-3cc6-44e4-8c30-fd4a3d535573', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=972899) INFO 07-09 20:08:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=972900) INFO 07-09 20:08:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=972901) INFO 07-09 20:08:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=972899) INFO 07-09 20:08:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=972900) INFO 07-09 20:08:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=972901) INFO 07-09 20:08:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=972902) INFO 07-09 20:08:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=972902) INFO 07-09 20:08:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=972901) WARNING 07-09 20:09:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=972902) WARNING 07-09 20:09:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=972900) WARNING 07-09 20:09:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=972899) WARNING 07-09 20:09:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:00 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_1fc21f7b'), local_subscribe_addr='ipc:///tmp/4aca0aab-b071-4a00-b8db-0c103875d4ea', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:00 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:00 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:00 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:00 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=972901) WARNING 07-09 20:09:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=972902) WARNING 07-09 20:09:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=972900) WARNING 07-09 20:09:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=972899) WARNING 07-09 20:09:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_1... +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_1... +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_1... +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_1... +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:31 [loader.py:458] Loading weights took 30.42 seconds +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:31 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 30.850261 seconds +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:31 [loader.py:458] Loading weights took 30.64 seconds +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:31 [loader.py:458] Loading weights took 30.66 seconds +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:31 [loader.py:458] Loading weights took 30.64 seconds +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:31 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 31.099034 seconds +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:31 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 31.099510 seconds +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:31 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 31.087886 seconds +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/58c5a6d797/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/58c5a6d797/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/58c5a6d797/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/58c5a6d797/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:43 [backends.py:430] Dynamo bytecode transform time: 11.51 s +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:43 [backends.py:430] Dynamo bytecode transform time: 11.52 s +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:43 [backends.py:430] Dynamo bytecode transform time: 11.52 s +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:43 [backends.py:430] Dynamo bytecode transform time: 11.52 s +(VllmWorker rank=1 pid=972900) INFO 07-09 20:09:50 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.028 s +(VllmWorker rank=0 pid=972899) INFO 07-09 20:09:51 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.140 s +(VllmWorker rank=2 pid=972901) INFO 07-09 20:09:51 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.170 s +(VllmWorker rank=3 pid=972902) INFO 07-09 20:09:51 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.213 s +(VllmWorker rank=2 pid=972901) INFO 07-09 20:10:00 [monitor.py:33] torch.compile takes 11.52 s in total +(VllmWorker rank=3 pid=972902) INFO 07-09 20:10:00 [monitor.py:33] torch.compile takes 11.52 s in total +(VllmWorker rank=0 pid=972899) INFO 07-09 20:10:00 [monitor.py:33] torch.compile takes 11.51 s in total +(VllmWorker rank=1 pid=972900) INFO 07-09 20:10:00 [monitor.py:33] torch.compile takes 11.52 s in total +INFO 07-09 20:10:02 [kv_cache_utils.py:634] GPU KV cache size: 1,262,080 tokens +INFO 07-09 20:10:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.25x +INFO 07-09 20:10:02 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:10:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:10:02 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:10:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:10:02 [kv_cache_utils.py:634] GPU KV cache size: 1,262,752 tokens +INFO 07-09 20:10:02 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.58x +(VllmWorker rank=3 pid=972902) INFO 07-09 20:10:35 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +(VllmWorker rank=2 pid=972901) INFO 07-09 20:10:35 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +(VllmWorker rank=1 pid=972900) INFO 07-09 20:10:35 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +(VllmWorker rank=0 pid=972899) INFO 07-09 20:10:35 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +INFO 07-09 20:10:35 [core.py:159] init engine (profile, create kv cache, warmup model) took 64.14 seconds +INFO 07-09 20:10:36 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 20:19:56 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 20:19:56 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7396|± |0.0160| +| | |math_pass@1:1_samples|0.9719|± |0.0053| +|mm\|arc_challenge\|0| 0|sem |0.9524|± |0.0107| +|mm\|arc_easy\|0 | 0|sem |0.9855|± |0.0038| +|mm\|commonsenseqa\|0| 0|sem |0.9284|± |0.0141| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9439|± |0.0105| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.8319|± |0.0353| + diff --git a/merge_qwen/logs/qwen_linear_3.log b/merge_qwen/logs/qwen_linear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..a91e18d3337c357a368960d2247ea070a98ffc69 --- /dev/null +++ b/merge_qwen/logs/qwen_linear_3.log @@ -0,0 +1,96 @@ +INFO 07-09 20:19:55 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:20:03 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'embed', 'generate', 'classify'}. Defaulting to 'generate'. +INFO 07-09 20:20:03 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:20:03 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:20:05 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_linear_3', speculative_config=None, tokenizer='./merged_qwen/qwen_linear_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_linear_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:20:05 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:20:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_865e48dc'), local_subscribe_addr='ipc:///tmp/8a5fde29-30f0-45b1-8eb0-3275fc08a580', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:20:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:20:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5a589353'), local_subscribe_addr='ipc:///tmp/90dbe245-a1e7-4a97-9a4a-49a50090dd00', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:20:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8602704b'), local_subscribe_addr='ipc:///tmp/ffa2f8a4-65bd-4f12-bbfc-121887774055', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:20:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_95edc88c'), local_subscribe_addr='ipc:///tmp/21ab5f54-94c4-4d12-b53c-4f5ca402cf49', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5c0676af'), local_subscribe_addr='ipc:///tmp/60ac64b1-e77a-45b6-ae00-60fc9fd39913', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=977709) WARNING 07-09 20:20:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=977707) WARNING 07-09 20:20:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=977710) WARNING 07-09 20:20:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=977708) WARNING 07-09 20:20:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_5754961c'), local_subscribe_addr='ipc:///tmp/6cf8d9f9-a3ec-4b3c-b82b-a44bb098518c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:07 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:07 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:07 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:07 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=977709) WARNING 07-09 20:20:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=977710) WARNING 07-09 20:20:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=977707) WARNING 07-09 20:20:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=977708) WARNING 07-09 20:20:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:07 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_3... +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:07 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_3... +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:07 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_3... +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:07 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_3... +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:37 [loader.py:458] Loading weights took 29.80 seconds +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:37 [loader.py:458] Loading weights took 29.82 seconds +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:37 [loader.py:458] Loading weights took 29.79 seconds +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:37 [loader.py:458] Loading weights took 29.80 seconds +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:37 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 30.037090 seconds +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:37 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 30.060678 seconds +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:37 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 30.063505 seconds +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:37 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 30.045281 seconds +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8953306b73/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8953306b73/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8953306b73/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/8953306b73/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:49 [backends.py:430] Dynamo bytecode transform time: 11.18 s +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:49 [backends.py:430] Dynamo bytecode transform time: 11.18 s +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:49 [backends.py:430] Dynamo bytecode transform time: 11.18 s +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:49 [backends.py:430] Dynamo bytecode transform time: 11.18 s +(VllmWorker rank=2 pid=977709) INFO 07-09 20:20:56 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.999 s +(VllmWorker rank=1 pid=977708) INFO 07-09 20:20:56 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.061 s +(VllmWorker rank=3 pid=977710) INFO 07-09 20:20:56 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.085 s +(VllmWorker rank=0 pid=977707) INFO 07-09 20:20:56 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.238 s +(VllmWorker rank=0 pid=977707) INFO 07-09 20:21:06 [monitor.py:33] torch.compile takes 11.18 s in total +(VllmWorker rank=1 pid=977708) INFO 07-09 20:21:06 [monitor.py:33] torch.compile takes 11.18 s in total +(VllmWorker rank=2 pid=977709) INFO 07-09 20:21:06 [monitor.py:33] torch.compile takes 11.18 s in total +(VllmWorker rank=3 pid=977710) INFO 07-09 20:21:06 [monitor.py:33] torch.compile takes 11.18 s in total +INFO 07-09 20:21:07 [kv_cache_utils.py:634] GPU KV cache size: 1,262,080 tokens +INFO 07-09 20:21:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.25x +INFO 07-09 20:21:07 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:21:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:21:07 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:21:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:21:07 [kv_cache_utils.py:634] GPU KV cache size: 1,262,752 tokens +INFO 07-09 20:21:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.58x +(VllmWorker rank=0 pid=977707) INFO 07-09 20:21:36 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 3.55 GiB +(VllmWorker rank=1 pid=977708) INFO 07-09 20:21:36 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 3.55 GiB +(VllmWorker rank=3 pid=977710) INFO 07-09 20:21:36 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 3.55 GiB +(VllmWorker rank=2 pid=977709) INFO 07-09 20:21:36 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 3.55 GiB +INFO 07-09 20:21:36 [core.py:159] init engine (profile, create kv cache, warmup model) took 58.54 seconds +INFO 07-09 20:21:36 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 20:27:20 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 20:27:21 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7551|± |0.0138| +| | |math_pass@1:1_samples|0.9792|± |0.0046| +|mm\|arc_challenge\|0| 0|sem |0.9749|± |0.0078| +|mm\|arc_easy\|0 | 0|sem |0.9959|± |0.0021| +|mm\|commonsenseqa\|0| 0|sem |0.9284|± |0.0141| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9584|± |0.0091| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.8761|± |0.0311| + diff --git a/merge_qwen/logs/qwen_linear_5.log b/merge_qwen/logs/qwen_linear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..7b66829b7283f730cc07d9ab5ec612a2cdcc1210 --- /dev/null +++ b/merge_qwen/logs/qwen_linear_5.log @@ -0,0 +1,100 @@ +INFO 07-09 20:27:19 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:27:28 [config.py:717] This model supports multiple tasks: {'reward', 'classify', 'score', 'embed', 'generate'}. Defaulting to 'generate'. +INFO 07-09 20:27:28 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:27:28 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:27:30 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_linear_5', speculative_config=None, tokenizer='./merged_qwen/qwen_linear_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_linear_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:27:30 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:27:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_719011d7'), local_subscribe_addr='ipc:///tmp/3ca4e221-27e3-4fa4-b577-90b7911d8315', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:27:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_108cb24a'), local_subscribe_addr='ipc:///tmp/2f55bd70-3387-4a4a-b0e4-0765fc41f262', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:27:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_39636a10'), local_subscribe_addr='ipc:///tmp/3dd89b78-c993-4ac8-83a7-53492073508d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:27:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:27:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5d706ebb'), local_subscribe_addr='ipc:///tmp/02b92bdf-03ae-412a-91d3-ec893a1e3444', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1f784d2d'), local_subscribe_addr='ipc:///tmp/b32b47ec-da94-498d-b3ea-5753ed70aec9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=982294) WARNING 07-09 20:27:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=982295) WARNING 07-09 20:27:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=982293) WARNING 07-09 20:27:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=982292) WARNING 07-09 20:27:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_23acfe6f'), local_subscribe_addr='ipc:///tmp/7bc35c9d-f85c-44c4-b8e2-4e62f9562546', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:32 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:32 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:32 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:32 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=982295) WARNING 07-09 20:27:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=982294) WARNING 07-09 20:27:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=982293) WARNING 07-09 20:27:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=982292) WARNING 07-09 20:27:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_5... +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_5... +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_5... +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_5... +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:54 [loader.py:458] Loading weights took 21.56 seconds +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:54 [loader.py:458] Loading weights took 21.65 seconds +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:54 [loader.py:458] Loading weights took 21.59 seconds +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:54 [loader.py:458] Loading weights took 21.60 seconds +(VllmWorker rank=2 pid=982294) INFO 07-09 20:27:54 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 21.856501 seconds +(VllmWorker rank=3 pid=982295) INFO 07-09 20:27:54 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 21.882402 seconds +(VllmWorker rank=1 pid=982293) INFO 07-09 20:27:54 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 21.886088 seconds +(VllmWorker rank=0 pid=982292) INFO 07-09 20:27:54 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 21.878955 seconds +(VllmWorker rank=2 pid=982294) INFO 07-09 20:28:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/b80fed33ef/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=982294) INFO 07-09 20:28:04 [backends.py:430] Dynamo bytecode transform time: 10.40 s +(VllmWorker rank=3 pid=982295) INFO 07-09 20:28:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/b80fed33ef/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=982295) INFO 07-09 20:28:04 [backends.py:430] Dynamo bytecode transform time: 10.49 s +(VllmWorker rank=0 pid=982292) INFO 07-09 20:28:05 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/b80fed33ef/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=982292) INFO 07-09 20:28:05 [backends.py:430] Dynamo bytecode transform time: 10.54 s +(VllmWorker rank=1 pid=982293) INFO 07-09 20:28:05 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/b80fed33ef/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=982293) INFO 07-09 20:28:05 [backends.py:430] Dynamo bytecode transform time: 10.64 s +(VllmWorker rank=0 pid=982292) INFO 07-09 20:28:08 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=982294) INFO 07-09 20:28:08 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=982295) INFO 07-09 20:28:08 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=982293) INFO 07-09 20:28:08 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=982295) INFO 07-09 20:28:41 [backends.py:148] Compiling a graph for general shape takes 35.58 s +(VllmWorker rank=2 pid=982294) INFO 07-09 20:28:41 [backends.py:148] Compiling a graph for general shape takes 36.15 s +(VllmWorker rank=1 pid=982293) INFO 07-09 20:28:41 [backends.py:148] Compiling a graph for general shape takes 36.15 s +(VllmWorker rank=0 pid=982292) INFO 07-09 20:28:42 [backends.py:148] Compiling a graph for general shape takes 36.53 s +(VllmWorker rank=1 pid=982293) INFO 07-09 20:29:07 [monitor.py:33] torch.compile takes 46.80 s in total +(VllmWorker rank=3 pid=982295) INFO 07-09 20:29:07 [monitor.py:33] torch.compile takes 46.08 s in total +(VllmWorker rank=2 pid=982294) INFO 07-09 20:29:07 [monitor.py:33] torch.compile takes 46.55 s in total +(VllmWorker rank=0 pid=982292) INFO 07-09 20:29:07 [monitor.py:33] torch.compile takes 47.07 s in total +INFO 07-09 20:29:09 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 20:29:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 20:29:09 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 20:29:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 20:29:09 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 20:29:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 20:29:09 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 20:29:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=1 pid=982293) INFO 07-09 20:29:45 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=3 pid=982295) INFO 07-09 20:29:45 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=2 pid=982294) INFO 07-09 20:29:45 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=0 pid=982292) INFO 07-09 20:29:45 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +INFO 07-09 20:29:45 [core.py:159] init engine (profile, create kv cache, warmup model) took 110.90 seconds +INFO 07-09 20:29:45 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 20:34:49 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 20:34:49 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7582|± |0.0132| +| | |math_pass@1:1_samples|0.9744|± |0.0130| +|mm\|arc_challenge\|0| 0|sem |0.9799|± |0.0070| +|mm\|arc_easy\|0 | 0|sem |0.9948|± |0.0023| +|mm\|commonsenseqa\|0| 0|sem |0.9134|± |0.0154| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9667|± |0.0082| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9821|± |0.0179| +|mm\|truthfulqa\|0 | 0|sem |0.9027|± |0.0280| + diff --git a/merge_qwen/logs/qwen_linear_7.log b/merge_qwen/logs/qwen_linear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..eea1ab3752855e2ef7bde1a4af9814ada84002f4 --- /dev/null +++ b/merge_qwen/logs/qwen_linear_7.log @@ -0,0 +1,96 @@ +INFO 07-09 20:34:48 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:34:56 [config.py:717] This model supports multiple tasks: {'generate', 'score', 'classify', 'reward', 'embed'}. Defaulting to 'generate'. +INFO 07-09 20:34:57 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:34:57 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:34:58 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_linear_7', speculative_config=None, tokenizer='./merged_qwen/qwen_linear_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_linear_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:34:58 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:34:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_158f81ac'), local_subscribe_addr='ipc:///tmp/d79f69ee-7bd4-4c8a-a4d2-1f04055acd81', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:34:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=987321) INFO 07-09 20:34:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f0abf152'), local_subscribe_addr='ipc:///tmp/9e2d41b2-b5e3-424d-b04d-43eca672791f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:34:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=987320) INFO 07-09 20:34:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f9f4b150'), local_subscribe_addr='ipc:///tmp/dae869c2-bdf6-468e-aaf8-ac1c0e8959c3', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:34:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:34:58 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=987322) INFO 07-09 20:34:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_aebd9780'), local_subscribe_addr='ipc:///tmp/a9ddec84-23bb-4215-94b4-ce037ad59180', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=987326) INFO 07-09 20:34:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4c3713d3'), local_subscribe_addr='ipc:///tmp/111009af-be7d-4ce6-ba02-ddab30fce2e4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:00 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:00 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:00 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:00 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:00 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:00 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:00 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:00 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=987322) WARNING 07-09 20:35:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=987321) WARNING 07-09 20:35:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=987326) WARNING 07-09 20:35:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=987320) WARNING 07-09 20:35:00 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:00 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_b47c8383'), local_subscribe_addr='ipc:///tmp/ea76b65d-53fe-42d1-b7eb-d8ec7aa4b01b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:00 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:00 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:00 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:00 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=987322) WARNING 07-09 20:35:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:00 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=987320) WARNING 07-09 20:35:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=987326) WARNING 07-09 20:35:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=987321) WARNING 07-09 20:35:00 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_7... +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_7... +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_7... +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:00 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_7... +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:29 [loader.py:458] Loading weights took 28.97 seconds +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:30 [loader.py:458] Loading weights took 29.11 seconds +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:30 [loader.py:458] Loading weights took 29.10 seconds +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:30 [loader.py:458] Loading weights took 29.13 seconds +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:30 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 29.218786 seconds +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:30 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 29.396722 seconds +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:30 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 29.379123 seconds +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:30 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 29.390407 seconds +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5905b07847/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:40 [backends.py:430] Dynamo bytecode transform time: 9.73 s +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5905b07847/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:40 [backends.py:430] Dynamo bytecode transform time: 9.81 s +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5905b07847/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:40 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5905b07847/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:40 [backends.py:430] Dynamo bytecode transform time: 9.91 s +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:40 [backends.py:430] Dynamo bytecode transform time: 9.91 s +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.821 s +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.920 s +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.935 s +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 6.985 s +(VllmWorker rank=3 pid=987326) INFO 07-09 20:35:57 [monitor.py:33] torch.compile takes 9.91 s in total +(VllmWorker rank=2 pid=987322) INFO 07-09 20:35:57 [monitor.py:33] torch.compile takes 9.81 s in total +(VllmWorker rank=0 pid=987320) INFO 07-09 20:35:57 [monitor.py:33] torch.compile takes 9.91 s in total +(VllmWorker rank=1 pid=987321) INFO 07-09 20:35:57 [monitor.py:33] torch.compile takes 9.73 s in total +INFO 07-09 20:35:59 [kv_cache_utils.py:634] GPU KV cache size: 1,262,080 tokens +INFO 07-09 20:35:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.25x +INFO 07-09 20:35:59 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:35:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:35:59 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:35:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:35:59 [kv_cache_utils.py:634] GPU KV cache size: 1,262,752 tokens +INFO 07-09 20:35:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.58x +(VllmWorker rank=2 pid=987322) INFO 07-09 20:36:29 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=1 pid=987321) INFO 07-09 20:36:29 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=0 pid=987320) INFO 07-09 20:36:29 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=3 pid=987326) INFO 07-09 20:36:29 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +INFO 07-09 20:36:29 [core.py:159] init engine (profile, create kv cache, warmup model) took 59.40 seconds +INFO 07-09 20:36:30 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 20:42:15 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 20:42:15 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7627|± |0.0125| +| | |math_pass@1:1_samples|0.9823|± |0.0042| +|mm\|arc_challenge\|0| 0|sem |0.9799|± |0.0070| +|mm\|arc_easy\|0 | 0|sem |0.9969|± |0.0018| +|mm\|commonsenseqa\|0| 0|sem |0.9254|± |0.0144| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9647|± |0.0084| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.9115|± |0.0268| + diff --git a/merge_qwen/logs/qwen_linear_9.log b/merge_qwen/logs/qwen_linear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..3da2f283e6445c782bcd10ff4d72abfc6500222c --- /dev/null +++ b/merge_qwen/logs/qwen_linear_9.log @@ -0,0 +1,96 @@ +INFO 07-09 20:42:14 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 20:42:23 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'reward', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 07-09 20:42:23 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 20:42:23 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 20:42:24 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_linear_9', speculative_config=None, tokenizer='./merged_qwen/qwen_linear_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_linear_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 20:42:24 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 20:42:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_3c93b70b'), local_subscribe_addr='ipc:///tmp/ae45066e-6d5a-45fa-8ef7-6c3d8803de02', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:42:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:42:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a52a6170'), local_subscribe_addr='ipc:///tmp/2de37d34-d3f8-40f3-ad76-c948827c8576', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 20:42:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 20:42:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a1658ad1'), local_subscribe_addr='ipc:///tmp/69efb66f-c11e-4b2a-8113-955dbee38e16', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_17182c4e'), local_subscribe_addr='ipc:///tmp/503de788-0a24-4c06-9304-9856300f6791', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1139bc52'), local_subscribe_addr='ipc:///tmp/a674435d-70c9-4b3a-a50c-5f3d5f46358b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=991792) WARNING 07-09 20:42:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=991791) WARNING 07-09 20:42:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=991789) WARNING 07-09 20:42:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=991790) WARNING 07-09 20:42:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_3b2adb71'), local_subscribe_addr='ipc:///tmp/69071314-72ca-418b-836c-5f525b14450e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:26 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:26 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:26 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:26 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=991792) WARNING 07-09 20:42:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=991791) WARNING 07-09 20:42:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=991789) WARNING 07-09 20:42:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=991790) WARNING 07-09 20:42:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:26 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_9... +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:26 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_9... +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:26 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_9... +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:26 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_linear_9... +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:53 [loader.py:458] Loading weights took 26.23 seconds +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:53 [loader.py:458] Loading weights took 26.38 seconds +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:53 [loader.py:458] Loading weights took 26.36 seconds +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:53 [loader.py:458] Loading weights took 26.35 seconds +(VllmWorker rank=3 pid=991792) INFO 07-09 20:42:53 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.469226 seconds +(VllmWorker rank=2 pid=991791) INFO 07-09 20:42:53 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.627609 seconds +(VllmWorker rank=1 pid=991790) INFO 07-09 20:42:53 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.630266 seconds +(VllmWorker rank=0 pid=991789) INFO 07-09 20:42:53 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.619580 seconds +(VllmWorker rank=3 pid=991792) INFO 07-09 20:43:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e49da2e1f3/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=991790) INFO 07-09 20:43:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e49da2e1f3/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=991792) INFO 07-09 20:43:03 [backends.py:430] Dynamo bytecode transform time: 9.61 s +(VllmWorker rank=1 pid=991790) INFO 07-09 20:43:03 [backends.py:430] Dynamo bytecode transform time: 9.61 s +(VllmWorker rank=2 pid=991791) INFO 07-09 20:43:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e49da2e1f3/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=991791) INFO 07-09 20:43:03 [backends.py:430] Dynamo bytecode transform time: 9.67 s +(VllmWorker rank=0 pid=991789) INFO 07-09 20:43:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/e49da2e1f3/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=991789) INFO 07-09 20:43:03 [backends.py:430] Dynamo bytecode transform time: 9.79 s +(VllmWorker rank=3 pid=991792) INFO 07-09 20:43:10 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.060 s +(VllmWorker rank=2 pid=991791) INFO 07-09 20:43:10 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.005 s +(VllmWorker rank=1 pid=991790) INFO 07-09 20:43:10 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.066 s +(VllmWorker rank=0 pid=991789) INFO 07-09 20:43:11 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 7.124 s +(VllmWorker rank=3 pid=991792) INFO 07-09 20:43:20 [monitor.py:33] torch.compile takes 9.61 s in total +(VllmWorker rank=0 pid=991789) INFO 07-09 20:43:20 [monitor.py:33] torch.compile takes 9.79 s in total +(VllmWorker rank=1 pid=991790) INFO 07-09 20:43:20 [monitor.py:33] torch.compile takes 9.61 s in total +(VllmWorker rank=2 pid=991791) INFO 07-09 20:43:20 [monitor.py:33] torch.compile takes 9.67 s in total +INFO 07-09 20:43:22 [kv_cache_utils.py:634] GPU KV cache size: 1,262,080 tokens +INFO 07-09 20:43:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.25x +INFO 07-09 20:43:22 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:43:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:43:22 [kv_cache_utils.py:634] GPU KV cache size: 1,261,904 tokens +INFO 07-09 20:43:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.16x +INFO 07-09 20:43:22 [kv_cache_utils.py:634] GPU KV cache size: 1,262,752 tokens +INFO 07-09 20:43:22 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 616.58x +(VllmWorker rank=3 pid=991792) INFO 07-09 20:43:53 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=2 pid=991791) INFO 07-09 20:43:53 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=1 pid=991790) INFO 07-09 20:43:53 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +(VllmWorker rank=0 pid=991789) INFO 07-09 20:43:53 [gpu_model_runner.py:1686] Graph capturing finished in 31 secs, took 3.55 GiB +INFO 07-09 20:43:53 [core.py:159] init engine (profile, create kv cache, warmup model) took 60.29 seconds +INFO 07-09 20:43:54 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 20:50:33 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 20:50:33 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7633|± |0.0125| +| | |math_pass@1:1_samples|0.9751|± |0.0050| +|mm\|arc_challenge\|0| 0|sem |0.9749|± |0.0078| +|mm\|arc_easy\|0 | 0|sem |0.9959|± |0.0021| +|mm\|commonsenseqa\|0| 0|sem |0.9254|± |0.0144| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9501|± |0.0099| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.9204|± |0.0256| + diff --git a/merge_qwen/logs/qwen_ties_1.log b/merge_qwen/logs/qwen_ties_1.log new file mode 100644 index 0000000000000000000000000000000000000000..280fb7e30b8d421a7732b6e87b824df0ea2221d4 --- /dev/null +++ b/merge_qwen/logs/qwen_ties_1.log @@ -0,0 +1,100 @@ +INFO 07-09 15:21:17 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 15:21:27 [config.py:717] This model supports multiple tasks: {'score', 'embed', 'generate', 'reward', 'classify'}. Defaulting to 'generate'. +INFO 07-09 15:21:27 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 15:21:27 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 15:21:28 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_ties_1', speculative_config=None, tokenizer='./merged_qwen/qwen_ties_1', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_ties_1, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 15:21:28 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 15:21:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e2bd8796'), local_subscribe_addr='ipc:///tmp/84a54c85-eab1-4264-9491-4c62f469ccd3', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:21:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 15:21:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_12042b39'), local_subscribe_addr='ipc:///tmp/9a70ea15-fe71-4a14-bb08-b10bb04d0221', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c6116366'), local_subscribe_addr='ipc:///tmp/170175dd-f2cc-4b6d-b0e7-5ea1d3f3d26e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:21:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 15:21:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c37d61df'), local_subscribe_addr='ipc:///tmp/94d3429d-f63d-40dd-9579-162a7d0f098d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e7dfd672'), local_subscribe_addr='ipc:///tmp/5d443404-df25-47c1-84ba-5f7b7f8a50a3', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=804009) WARNING 07-09 15:21:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=804010) WARNING 07-09 15:21:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=804008) WARNING 07-09 15:21:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=804007) WARNING 07-09 15:21:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_cb78ecf8'), local_subscribe_addr='ipc:///tmp/b1de7249-f8a2-46be-9d62-a87aa6b3786b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:30 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:30 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:30 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:30 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=804009) WARNING 07-09 15:21:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=804010) WARNING 07-09 15:21:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=804008) WARNING 07-09 15:21:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=804007) WARNING 07-09 15:21:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:30 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_1... +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:30 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_1... +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:30 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_1... +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:30 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_1... +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:56 [loader.py:458] Loading weights took 25.21 seconds +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:56 [loader.py:458] Loading weights took 25.47 seconds +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:56 [loader.py:458] Loading weights took 25.45 seconds +(VllmWorker rank=3 pid=804010) INFO 07-09 15:21:56 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.468885 seconds +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:56 [loader.py:458] Loading weights took 25.46 seconds +(VllmWorker rank=2 pid=804009) INFO 07-09 15:21:56 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.733082 seconds +(VllmWorker rank=1 pid=804008) INFO 07-09 15:21:56 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.730584 seconds +(VllmWorker rank=0 pid=804007) INFO 07-09 15:21:56 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.717595 seconds +(VllmWorker rank=2 pid=804009) INFO 07-09 15:22:07 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5b96571173/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=804008) INFO 07-09 15:22:07 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5b96571173/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=804010) INFO 07-09 15:22:07 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5b96571173/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=804009) INFO 07-09 15:22:07 [backends.py:430] Dynamo bytecode transform time: 10.43 s +(VllmWorker rank=1 pid=804008) INFO 07-09 15:22:07 [backends.py:430] Dynamo bytecode transform time: 10.43 s +(VllmWorker rank=3 pid=804010) INFO 07-09 15:22:07 [backends.py:430] Dynamo bytecode transform time: 10.43 s +(VllmWorker rank=0 pid=804007) INFO 07-09 15:22:07 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/5b96571173/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=804007) INFO 07-09 15:22:07 [backends.py:430] Dynamo bytecode transform time: 10.43 s +(VllmWorker rank=0 pid=804007) INFO 07-09 15:22:10 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=804008) INFO 07-09 15:22:10 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=804010) INFO 07-09 15:22:10 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=804009) INFO 07-09 15:22:10 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=804010) INFO 07-09 15:22:43 [backends.py:148] Compiling a graph for general shape takes 35.94 s +(VllmWorker rank=2 pid=804009) INFO 07-09 15:22:43 [backends.py:148] Compiling a graph for general shape takes 36.03 s +(VllmWorker rank=1 pid=804008) INFO 07-09 15:22:44 [backends.py:148] Compiling a graph for general shape takes 36.42 s +(VllmWorker rank=0 pid=804007) INFO 07-09 15:22:44 [backends.py:148] Compiling a graph for general shape takes 36.48 s +(VllmWorker rank=2 pid=804009) INFO 07-09 15:23:09 [monitor.py:33] torch.compile takes 46.46 s in total +(VllmWorker rank=3 pid=804010) INFO 07-09 15:23:09 [monitor.py:33] torch.compile takes 46.36 s in total +(VllmWorker rank=0 pid=804007) INFO 07-09 15:23:09 [monitor.py:33] torch.compile takes 46.91 s in total +(VllmWorker rank=1 pid=804008) INFO 07-09 15:23:09 [monitor.py:33] torch.compile takes 46.84 s in total +INFO 07-09 15:23:11 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 15:23:11 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 15:23:11 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:23:11 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:23:11 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:23:11 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:23:11 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 15:23:11 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=2 pid=804009) INFO 07-09 15:23:48 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.55 GiB +(VllmWorker rank=3 pid=804010) INFO 07-09 15:23:48 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.55 GiB +(VllmWorker rank=1 pid=804008) INFO 07-09 15:23:48 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.55 GiB +(VllmWorker rank=0 pid=804007) INFO 07-09 15:23:48 [gpu_model_runner.py:1686] Graph capturing finished in 37 secs, took 3.55 GiB +INFO 07-09 15:23:48 [core.py:159] init engine (profile, create kv cache, warmup model) took 111.86 seconds +INFO 07-09 15:23:49 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 15:28:03 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 15:28:03 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7470|± |0.0148| +| | |math_pass@1:1_samples|0.9740|± |0.0051| +|mm\|arc_challenge\|0| 0|sem |0.9724|± |0.0082| +|mm\|arc_easy\|0 | 0|sem |0.9938|± |0.0025| +|mm\|commonsenseqa\|0| 0|sem |0.9015|± |0.0163| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9480|± |0.0101| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.8673|± |0.0321| + diff --git a/merge_qwen/logs/qwen_ties_3.log b/merge_qwen/logs/qwen_ties_3.log new file mode 100644 index 0000000000000000000000000000000000000000..4468ff7c5ca844ed603b96818b366e2467c45cf3 --- /dev/null +++ b/merge_qwen/logs/qwen_ties_3.log @@ -0,0 +1,100 @@ +INFO 07-09 15:28:02 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 15:28:10 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'classify', 'embed', 'reward'}. Defaulting to 'generate'. +INFO 07-09 15:28:10 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 15:28:10 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 15:28:12 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_ties_3', speculative_config=None, tokenizer='./merged_qwen/qwen_ties_3', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_ties_3, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 15:28:12 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 15:28:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_b713fbf6'), local_subscribe_addr='ipc:///tmp/5fa42554-0a29-4f00-8620-8315bdc6c4a9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:28:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 15:28:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9162c84c'), local_subscribe_addr='ipc:///tmp/7278b4de-3cb1-4bf2-a4bd-bb1bb1ec088a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:28:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c8f39815'), local_subscribe_addr='ipc:///tmp/d264210a-530b-4c3b-8c82-19cc5d29b705', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:28:12 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_55ba13a0'), local_subscribe_addr='ipc:///tmp/d96a3798-eba4-418e-aa10-21dd1cfa2c35', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:12 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1b4f38d3'), local_subscribe_addr='ipc:///tmp/ddc6c897-ec96-4181-ba06-8d5ca28015ff', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=808804) WARNING 07-09 15:28:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=808803) WARNING 07-09 15:28:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=808801) WARNING 07-09 15:28:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=808802) WARNING 07-09 15:28:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_e8a1d404'), local_subscribe_addr='ipc:///tmp/78df3e9a-c80b-4d15-ad53-83e7419588ae', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:14 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:14 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:14 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:14 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=808804) WARNING 07-09 15:28:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=808803) WARNING 07-09 15:28:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=808801) WARNING 07-09 15:28:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=808802) WARNING 07-09 15:28:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:14 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_3... +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:14 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_3... +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:14 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_3... +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:14 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_3... +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:39 [loader.py:458] Loading weights took 24.49 seconds +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:39 [loader.py:458] Loading weights took 24.56 seconds +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:39 [loader.py:458] Loading weights took 24.56 seconds +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:39 [loader.py:458] Loading weights took 24.59 seconds +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:39 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 24.741471 seconds +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:39 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 24.835222 seconds +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:39 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 24.821693 seconds +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:39 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 24.834526 seconds +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ff68b32485/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:49 [backends.py:430] Dynamo bytecode transform time: 10.03 s +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ff68b32485/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:49 [backends.py:430] Dynamo bytecode transform time: 10.12 s +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ff68b32485/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:49 [backends.py:430] Dynamo bytecode transform time: 10.19 s +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:50 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/ff68b32485/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:50 [backends.py:430] Dynamo bytecode transform time: 10.37 s +(VllmWorker rank=1 pid=808802) INFO 07-09 15:28:53 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=808803) INFO 07-09 15:28:53 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=808801) INFO 07-09 15:28:53 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=808804) INFO 07-09 15:28:53 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=808802) INFO 07-09 15:29:26 [backends.py:148] Compiling a graph for general shape takes 36.12 s +(VllmWorker rank=2 pid=808803) INFO 07-09 15:29:26 [backends.py:148] Compiling a graph for general shape takes 36.06 s +(VllmWorker rank=3 pid=808804) INFO 07-09 15:29:27 [backends.py:148] Compiling a graph for general shape takes 36.33 s +(VllmWorker rank=0 pid=808801) INFO 07-09 15:29:27 [backends.py:148] Compiling a graph for general shape takes 36.56 s +(VllmWorker rank=1 pid=808802) INFO 07-09 15:29:52 [monitor.py:33] torch.compile takes 46.15 s in total +(VllmWorker rank=0 pid=808801) INFO 07-09 15:29:52 [monitor.py:33] torch.compile takes 46.75 s in total +(VllmWorker rank=3 pid=808804) INFO 07-09 15:29:52 [monitor.py:33] torch.compile takes 46.70 s in total +(VllmWorker rank=2 pid=808803) INFO 07-09 15:29:52 [monitor.py:33] torch.compile takes 46.17 s in total +INFO 07-09 15:29:54 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 15:29:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 15:29:54 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:29:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:29:54 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:29:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:29:54 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 15:29:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=3 pid=808804) INFO 07-09 15:30:31 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 3.55 GiB +(VllmWorker rank=2 pid=808803) INFO 07-09 15:30:31 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 3.55 GiB +(VllmWorker rank=1 pid=808802) INFO 07-09 15:30:31 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 3.55 GiB +(VllmWorker rank=0 pid=808801) INFO 07-09 15:30:31 [gpu_model_runner.py:1686] Graph capturing finished in 38 secs, took 3.55 GiB +INFO 07-09 15:30:32 [core.py:159] init engine (profile, create kv cache, warmup model) took 112.27 seconds +INFO 07-09 15:30:32 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 15:36:39 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 15:36:39 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7563|± |0.0137| +| | |math_pass@1:1_samples|0.9844|± |0.0040| +|mm\|arc_challenge\|0| 0|sem |0.9774|± |0.0074| +|mm\|arc_easy\|0 | 0|sem |0.9907|± |0.0031| +|mm\|commonsenseqa\|0| 0|sem |0.9284|± |0.0141| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9688|± |0.0079| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.8850|± |0.0301| + diff --git a/merge_qwen/logs/qwen_ties_5.log b/merge_qwen/logs/qwen_ties_5.log new file mode 100644 index 0000000000000000000000000000000000000000..0d2b755f23160e007f229f01df629c4492029151 --- /dev/null +++ b/merge_qwen/logs/qwen_ties_5.log @@ -0,0 +1,100 @@ +INFO 07-09 15:36:38 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 15:36:47 [config.py:717] This model supports multiple tasks: {'reward', 'embed', 'generate', 'score', 'classify'}. Defaulting to 'generate'. +INFO 07-09 15:36:47 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 15:36:47 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 15:36:49 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_ties_5', speculative_config=None, tokenizer='./merged_qwen/qwen_ties_5', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_ties_5, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 15:36:49 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 15:36:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_2b15b0a0'), local_subscribe_addr='ipc:///tmp/40fd9540-28ef-4ac2-a7af-c48410622b04', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:36:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=814378) INFO 07-09 15:36:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_148986ac'), local_subscribe_addr='ipc:///tmp/e6919a49-52f5-40a1-82f6-1f919bd7d81b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:36:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=814377) INFO 07-09 15:36:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4f49578b'), local_subscribe_addr='ipc:///tmp/65491d4f-7f7a-411e-ad50-6e809242ee8b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:36:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 15:36:49 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=814379) INFO 07-09 15:36:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_64d602c7'), local_subscribe_addr='ipc:///tmp/bf89f477-1514-4297-bc28-3bc2949b9171', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=814380) INFO 07-09 15:36:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_26a0694e'), local_subscribe_addr='ipc:///tmp/022b9aa5-c399-4039-96de-72c1150d2ee8', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=814377) INFO 07-09 15:36:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=814378) INFO 07-09 15:36:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=814379) INFO 07-09 15:36:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=814377) INFO 07-09 15:36:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=814378) INFO 07-09 15:36:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=814379) INFO 07-09 15:36:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=814380) INFO 07-09 15:36:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=814380) INFO 07-09 15:36:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=814379) WARNING 07-09 15:36:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=814380) WARNING 07-09 15:36:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=814378) WARNING 07-09 15:36:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=814377) WARNING 07-09 15:36:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=814377) INFO 07-09 15:36:51 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_c760a404'), local_subscribe_addr='ipc:///tmp/f67d9440-6fe7-421d-9e5d-8214973839dc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=814379) INFO 07-09 15:36:51 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=814380) INFO 07-09 15:36:51 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=814377) INFO 07-09 15:36:51 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=814378) INFO 07-09 15:36:51 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=814379) INFO 07-09 15:36:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=814380) INFO 07-09 15:36:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=814379) WARNING 07-09 15:36:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=814377) INFO 07-09 15:36:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=814378) INFO 07-09 15:36:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=814380) WARNING 07-09 15:36:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=814377) WARNING 07-09 15:36:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=814378) WARNING 07-09 15:36:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=814379) INFO 07-09 15:36:51 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_5... +(VllmWorker rank=3 pid=814380) INFO 07-09 15:36:51 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_5... +(VllmWorker rank=1 pid=814378) INFO 07-09 15:36:51 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_5... +(VllmWorker rank=0 pid=814377) INFO 07-09 15:36:51 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_5... +(VllmWorker rank=3 pid=814380) INFO 07-09 15:37:17 [loader.py:458] Loading weights took 26.33 seconds +(VllmWorker rank=2 pid=814379) INFO 07-09 15:37:17 [loader.py:458] Loading weights took 26.42 seconds +(VllmWorker rank=1 pid=814378) INFO 07-09 15:37:17 [loader.py:458] Loading weights took 26.39 seconds +(VllmWorker rank=0 pid=814377) INFO 07-09 15:37:17 [loader.py:458] Loading weights took 26.39 seconds +(VllmWorker rank=3 pid=814380) INFO 07-09 15:37:18 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.573838 seconds +(VllmWorker rank=2 pid=814379) INFO 07-09 15:37:18 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.663953 seconds +(VllmWorker rank=1 pid=814378) INFO 07-09 15:37:18 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.664521 seconds +(VllmWorker rank=0 pid=814377) INFO 07-09 15:37:18 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 26.657965 seconds +(VllmWorker rank=0 pid=814377) INFO 07-09 15:37:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c6f23e9825/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=814377) INFO 07-09 15:37:27 [backends.py:430] Dynamo bytecode transform time: 9.74 s +(VllmWorker rank=1 pid=814378) INFO 07-09 15:37:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c6f23e9825/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=814378) INFO 07-09 15:37:27 [backends.py:430] Dynamo bytecode transform time: 9.78 s +(VllmWorker rank=3 pid=814380) INFO 07-09 15:37:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c6f23e9825/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=814380) INFO 07-09 15:37:27 [backends.py:430] Dynamo bytecode transform time: 9.81 s +(VllmWorker rank=2 pid=814379) INFO 07-09 15:37:27 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/c6f23e9825/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=814379) INFO 07-09 15:37:27 [backends.py:430] Dynamo bytecode transform time: 9.81 s +(VllmWorker rank=0 pid=814377) INFO 07-09 15:37:31 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=814378) INFO 07-09 15:37:31 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=814379) INFO 07-09 15:37:31 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=814380) INFO 07-09 15:37:31 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=814377) INFO 07-09 15:38:04 [backends.py:148] Compiling a graph for general shape takes 36.13 s +(VllmWorker rank=1 pid=814378) INFO 07-09 15:38:04 [backends.py:148] Compiling a graph for general shape takes 36.27 s +(VllmWorker rank=2 pid=814379) INFO 07-09 15:38:04 [backends.py:148] Compiling a graph for general shape takes 36.37 s +(VllmWorker rank=3 pid=814380) INFO 07-09 15:38:05 [backends.py:148] Compiling a graph for general shape takes 36.61 s +(VllmWorker rank=0 pid=814377) INFO 07-09 15:38:30 [monitor.py:33] torch.compile takes 45.87 s in total +(VllmWorker rank=1 pid=814378) INFO 07-09 15:38:30 [monitor.py:33] torch.compile takes 46.05 s in total +(VllmWorker rank=3 pid=814380) INFO 07-09 15:38:30 [monitor.py:33] torch.compile takes 46.42 s in total +(VllmWorker rank=2 pid=814379) INFO 07-09 15:38:30 [monitor.py:33] torch.compile takes 46.18 s in total +INFO 07-09 15:38:32 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 15:38:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 15:38:32 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:38:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:38:32 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:38:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:38:32 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 15:38:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=3 pid=814380) INFO 07-09 15:39:23 [gpu_model_runner.py:1686] Graph capturing finished in 52 secs, took 3.55 GiB +(VllmWorker rank=1 pid=814378) INFO 07-09 15:39:23 [gpu_model_runner.py:1686] Graph capturing finished in 52 secs, took 3.55 GiB +(VllmWorker rank=2 pid=814379) INFO 07-09 15:39:23 [gpu_model_runner.py:1686] Graph capturing finished in 52 secs, took 3.55 GiB +(VllmWorker rank=0 pid=814377) INFO 07-09 15:39:24 [gpu_model_runner.py:1686] Graph capturing finished in 52 secs, took 3.55 GiB +INFO 07-09 15:39:24 [core.py:159] init engine (profile, create kv cache, warmup model) took 126.26 seconds +INFO 07-09 15:39:24 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 15:46:20 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 15:46:21 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.9604|± |0.0131| +| | |math_pass@1:1_samples|0.9792|± |0.0046| +|mm\|arc_challenge\|0| 0|sem |0.9674|± |0.0089| +|mm\|arc_easy\|0 | 0|sem |0.9886|± |0.0034| +|mm\|commonsenseqa\|0| 0|sem |0.9254|± |0.0144| +|mm\|gpqa_diamond\|0 | 2|sem |1.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9584|± |0.0091| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.9204|± |0.0256| + diff --git a/merge_qwen/logs/qwen_ties_7.log b/merge_qwen/logs/qwen_ties_7.log new file mode 100644 index 0000000000000000000000000000000000000000..4c95d3c23cb1656ef4e357730f797be29d9664eb --- /dev/null +++ b/merge_qwen/logs/qwen_ties_7.log @@ -0,0 +1,100 @@ +INFO 07-09 15:46:19 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 15:46:28 [config.py:717] This model supports multiple tasks: {'reward', 'embed', 'generate', 'score', 'classify'}. Defaulting to 'generate'. +INFO 07-09 15:46:28 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 15:46:28 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 15:46:29 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_ties_7', speculative_config=None, tokenizer='./merged_qwen/qwen_ties_7', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_ties_7, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 15:46:29 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 15:46:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_0f71d881'), local_subscribe_addr='ipc:///tmp/c5b72275-9a44-43c1-8344-0b78bb24dbe5', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:46:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 15:46:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8d8c0c0a'), local_subscribe_addr='ipc:///tmp/c34784c5-3fdc-4863-9d6b-1fdd5c05355b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bab3d690'), local_subscribe_addr='ipc:///tmp/8011842d-f803-4661-94c8-65f6b102606b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:46:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 15:46:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f41c65de'), local_subscribe_addr='ipc:///tmp/cfab6314-2b48-462d-9c59-d3a42f22690e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4d8c73c2'), local_subscribe_addr='ipc:///tmp/8815c8d9-a1f7-401b-b7ef-dc12166bf672', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=820207) WARNING 07-09 15:46:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=820208) WARNING 07-09 15:46:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=820205) WARNING 07-09 15:46:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=820206) WARNING 07-09 15:46:32 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_ed7c33bc'), local_subscribe_addr='ipc:///tmp/e988ac8a-0053-4141-99f0-48cd277e0e79', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:32 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:32 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:32 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:32 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=820208) WARNING 07-09 15:46:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=820207) WARNING 07-09 15:46:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=820205) WARNING 07-09 15:46:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:32 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=820206) WARNING 07-09 15:46:32 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_7... +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_7... +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_7... +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:32 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_7... +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:57 [loader.py:458] Loading weights took 25.25 seconds +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:57 [loader.py:458] Loading weights took 25.42 seconds +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:57 [loader.py:458] Loading weights took 25.42 seconds +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:57 [loader.py:458] Loading weights took 25.42 seconds +(VllmWorker rank=1 pid=820206) INFO 07-09 15:46:57 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.515890 seconds +(VllmWorker rank=2 pid=820207) INFO 07-09 15:46:58 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.685134 seconds +(VllmWorker rank=3 pid=820208) INFO 07-09 15:46:58 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.685878 seconds +(VllmWorker rank=0 pid=820205) INFO 07-09 15:46:58 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 25.681933 seconds +(VllmWorker rank=0 pid=820205) INFO 07-09 15:47:08 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0267a34bea/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=820206) INFO 07-09 15:47:08 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0267a34bea/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=820207) INFO 07-09 15:47:08 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0267a34bea/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=820205) INFO 07-09 15:47:08 [backends.py:430] Dynamo bytecode transform time: 10.16 s +(VllmWorker rank=1 pid=820206) INFO 07-09 15:47:08 [backends.py:430] Dynamo bytecode transform time: 10.15 s +(VllmWorker rank=2 pid=820207) INFO 07-09 15:47:08 [backends.py:430] Dynamo bytecode transform time: 10.16 s +(VllmWorker rank=3 pid=820208) INFO 07-09 15:47:08 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/0267a34bea/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=820208) INFO 07-09 15:47:08 [backends.py:430] Dynamo bytecode transform time: 10.34 s +(VllmWorker rank=0 pid=820205) INFO 07-09 15:47:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=820207) INFO 07-09 15:47:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=820206) INFO 07-09 15:47:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=820208) INFO 07-09 15:47:11 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=820207) INFO 07-09 15:47:45 [backends.py:148] Compiling a graph for general shape takes 36.25 s +(VllmWorker rank=1 pid=820206) INFO 07-09 15:47:45 [backends.py:148] Compiling a graph for general shape takes 36.72 s +(VllmWorker rank=0 pid=820205) INFO 07-09 15:47:45 [backends.py:148] Compiling a graph for general shape takes 36.83 s +(VllmWorker rank=3 pid=820208) INFO 07-09 15:47:45 [backends.py:148] Compiling a graph for general shape takes 36.71 s +(VllmWorker rank=0 pid=820205) INFO 07-09 15:48:11 [monitor.py:33] torch.compile takes 46.99 s in total +(VllmWorker rank=3 pid=820208) INFO 07-09 15:48:11 [monitor.py:33] torch.compile takes 47.05 s in total +(VllmWorker rank=1 pid=820206) INFO 07-09 15:48:11 [monitor.py:33] torch.compile takes 46.88 s in total +(VllmWorker rank=2 pid=820207) INFO 07-09 15:48:11 [monitor.py:33] torch.compile takes 46.41 s in total +INFO 07-09 15:48:12 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 15:48:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 15:48:12 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:48:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:48:12 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:48:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:48:12 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 15:48:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=0 pid=820205) INFO 07-09 15:48:46 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +(VllmWorker rank=3 pid=820208) INFO 07-09 15:48:46 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +(VllmWorker rank=2 pid=820207) INFO 07-09 15:48:46 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +(VllmWorker rank=1 pid=820206) INFO 07-09 15:48:46 [gpu_model_runner.py:1686] Graph capturing finished in 33 secs, took 3.55 GiB +INFO 07-09 15:48:46 [core.py:159] init engine (profile, create kv cache, warmup model) took 108.12 seconds +INFO 07-09 15:48:46 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 15:55:52 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 15:55:52 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7570|± |0.0134| +| | |math_pass@1:1_samples|0.9823|± |0.0042| +|mm\|arc_challenge\|0| 0|sem |0.9724|± |0.0082| +|mm\|arc_easy\|0 | 0|sem |0.9907|± |0.0031| +|mm\|commonsenseqa\|0| 0|sem |0.9104|± |0.0156| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9647|± |0.0084| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.9115|± |0.0268| + diff --git a/merge_qwen/logs/qwen_ties_9.log b/merge_qwen/logs/qwen_ties_9.log new file mode 100644 index 0000000000000000000000000000000000000000..7f2f4661862b8baee662a10fb12e5d2a25f93b42 --- /dev/null +++ b/merge_qwen/logs/qwen_ties_9.log @@ -0,0 +1,100 @@ +INFO 07-09 15:55:51 [__init__.py:239] Automatically detected platform cuda. +INFO 07-09 15:56:00 [config.py:717] This model supports multiple tasks: {'classify', 'embed', 'score', 'generate', 'reward'}. Defaulting to 'generate'. +INFO 07-09 15:56:00 [config.py:1770] Defaulting to use mp for distributed inference +INFO 07-09 15:56:00 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 07-09 15:56:01 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./merged_qwen/qwen_ties_9', speculative_config=None, tokenizer='./merged_qwen/qwen_ties_9', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.float16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./merged_qwen/qwen_ties_9, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 07-09 15:56:01 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 07-09 15:56:01 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_8dd1551b'), local_subscribe_addr='ipc:///tmp/47c21106-62f2-414c-a47f-184d958a88d2', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:56:01 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:01 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_536777b2'), local_subscribe_addr='ipc:///tmp/fd2d4fed-ae38-4acf-8fa7-ec3763681025', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:56:01 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:01 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6d49d7e7'), local_subscribe_addr='ipc:///tmp/60d2b0de-85cf-4938-a5ce-93508b340720', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 07-09 15:56:01 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 07-09 15:56:01 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:01 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a5364c06'), local_subscribe_addr='ipc:///tmp/45a5de53-ce90-4a3a-89d0-605c7095a74c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:01 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8705ccb4'), local_subscribe_addr='ipc:///tmp/3bc17621-6cb0-4258-921e-8c801a2e4ea3', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:03 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:03 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=826007) WARNING 07-09 15:56:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=826008) WARNING 07-09 15:56:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=826005) WARNING 07-09 15:56:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=826006) WARNING 07-09 15:56:03 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_8d9278f5'), local_subscribe_addr='ipc:///tmp/20e2a812-bb78-4c8e-b01c-08ab5506bc85', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:03 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:03 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:03 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:03 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:03 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:03 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=826007) WARNING 07-09 15:56:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=826008) WARNING 07-09 15:56:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:03 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=826005) WARNING 07-09 15:56:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:03 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=826006) WARNING 07-09 15:56:03 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:03 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_9... +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:03 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_9... +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:03 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_9... +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:03 [gpu_model_runner.py:1329] Starting to load model ./merged_qwen/qwen_ties_9... +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:26 [loader.py:458] Loading weights took 22.55 seconds +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:26 [loader.py:458] Loading weights took 22.64 seconds +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:26 [loader.py:458] Loading weights took 22.69 seconds +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:26 [loader.py:458] Loading weights took 22.67 seconds +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:26 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 22.821507 seconds +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:27 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 22.925357 seconds +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:27 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 22.939005 seconds +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:27 [gpu_model_runner.py:1347] Model loading took 6.9652 GiB and 22.903687 seconds +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:36 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/db2a0fd13d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:36 [backends.py:430] Dynamo bytecode transform time: 9.78 s +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:36 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/db2a0fd13d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:36 [backends.py:430] Dynamo bytecode transform time: 9.79 s +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/db2a0fd13d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:37 [backends.py:430] Dynamo bytecode transform time: 9.85 s +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/db2a0fd13d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:37 [backends.py:430] Dynamo bytecode transform time: 9.96 s +(VllmWorker rank=1 pid=826006) INFO 07-09 15:56:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=826007) INFO 07-09 15:56:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=826008) INFO 07-09 15:56:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=826005) INFO 07-09 15:56:40 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=2 pid=826007) INFO 07-09 15:57:13 [backends.py:148] Compiling a graph for general shape takes 35.52 s +(VllmWorker rank=1 pid=826006) INFO 07-09 15:57:13 [backends.py:148] Compiling a graph for general shape takes 35.66 s +(VllmWorker rank=3 pid=826008) INFO 07-09 15:57:13 [backends.py:148] Compiling a graph for general shape takes 35.94 s +(VllmWorker rank=0 pid=826005) INFO 07-09 15:57:13 [backends.py:148] Compiling a graph for general shape takes 36.08 s +(VllmWorker rank=0 pid=826005) INFO 07-09 15:57:39 [monitor.py:33] torch.compile takes 46.04 s in total +(VllmWorker rank=3 pid=826008) INFO 07-09 15:57:39 [monitor.py:33] torch.compile takes 45.79 s in total +(VllmWorker rank=1 pid=826006) INFO 07-09 15:57:39 [monitor.py:33] torch.compile takes 45.44 s in total +(VllmWorker rank=2 pid=826007) INFO 07-09 15:57:39 [monitor.py:33] torch.compile takes 45.31 s in total +INFO 07-09 15:57:40 [kv_cache_utils.py:634] GPU KV cache size: 1,256,992 tokens +INFO 07-09 15:57:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.77x +INFO 07-09 15:57:40 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:57:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:57:40 [kv_cache_utils.py:634] GPU KV cache size: 1,256,832 tokens +INFO 07-09 15:57:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 613.69x +INFO 07-09 15:57:40 [kv_cache_utils.py:634] GPU KV cache size: 1,257,680 tokens +INFO 07-09 15:57:40 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 614.10x +(VllmWorker rank=1 pid=826006) INFO 07-09 15:58:16 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=2 pid=826007) INFO 07-09 15:58:16 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=0 pid=826005) INFO 07-09 15:58:17 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +(VllmWorker rank=3 pid=826008) INFO 07-09 15:58:17 [gpu_model_runner.py:1686] Graph capturing finished in 36 secs, took 3.55 GiB +INFO 07-09 15:58:17 [core.py:159] init engine (profile, create kv cache, warmup model) took 110.04 seconds +INFO 07-09 15:58:17 [core_client.py:439] Core engine process 0 ready. +INFO 07-09 16:05:22 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:05:22 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7541|± |0.0139| +| | |math_pass@1:1_samples|0.9802|± |0.0044| +|mm\|arc_challenge\|0| 0|sem |0.9724|± |0.0082| +|mm\|arc_easy\|0 | 0|sem |0.9938|± |0.0025| +|mm\|commonsenseqa\|0| 0|sem |0.9194|± |0.0149| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9605|± |0.0089| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.8850|± |0.0301| + diff --git a/merge_qwen/logs/show_results.log b/merge_qwen/logs/show_results.log new file mode 100644 index 0000000000000000000000000000000000000000..99c962b0bab86bc6cc208dcedf8650b8a71a8c7f --- /dev/null +++ b/merge_qwen/logs/show_results.log @@ -0,0 +1,137 @@ +| Task |Version| Metric | Model |Value | |Stderr| +|--------------------|------:|---------------------|--------------------------------|-----:|---|-----:| +|mm\|arc_challenge_c\|0| 0|em |._models_Qwen2.5-14B |0.9674|± |0.0089| +| | |pem |._models_Qwen2.5-14B |0.9674|± |0.0089| +| | |pqem |._models_Qwen2.5-14B |0.9749|± |0.0078| +| | |qem |._models_Qwen2.5-14B |0.9674|± |0.0089| +|mm\|arc_challenge\|0 | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9524|± |0.0107| +| | | |._merged_qwen_qwen_linear_3 |0.9749|± |0.0078| +| | | |._merged_qwen_qwen_linear_5 |0.9799|± |0.0070| +| | | |._merged_qwen_qwen_linear_7 |0.9799|± |0.0070| +| | | |._merged_qwen_qwen_linear_9 |0.9749|± |0.0078| +| | | |._merged_qwen_qwen_ties_1 |0.9724|± |0.0082| +| | | |._merged_qwen_qwen_ties_3 |0.9774|± |0.0074| +| | | |._merged_qwen_qwen_ties_5 |0.9674|± |0.0089| +| | | |._merged_qwen_qwen_ties_7 |0.9724|± |0.0082| +| | | |._merged_qwen_qwen_ties_9 |0.9724|± |0.0082| +| | | |._models_R1-Qwen2.5-14B |0.9825|± |0.0066| +|mm\|arc_easy_c\|0 | 0|em |._models_Qwen2.5-14B |0.9959|± |0.0021| +| | |pem |._models_Qwen2.5-14B |0.9959|± |0.0021| +| | |pqem |._models_Qwen2.5-14B |0.9969|± |0.0018| +| | |qem |._models_Qwen2.5-14B |0.9959|± |0.0021| +|mm\|arc_easy\|0 | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9855|± |0.0038| +| | | |._merged_qwen_qwen_linear_3 |0.9959|± |0.0021| +| | | |._merged_qwen_qwen_linear_5 |0.9948|± |0.0023| +| | | |._merged_qwen_qwen_linear_7 |0.9969|± |0.0018| +| | | |._merged_qwen_qwen_linear_9 |0.9959|± |0.0021| +| | | |._merged_qwen_qwen_ties_1 |0.9938|± |0.0025| +| | | |._merged_qwen_qwen_ties_3 |0.9907|± |0.0031| +| | | |._merged_qwen_qwen_ties_5 |0.9886|± |0.0034| +| | | |._merged_qwen_qwen_ties_7 |0.9907|± |0.0031| +| | | |._merged_qwen_qwen_ties_9 |0.9938|± |0.0025| +| | | |._models_R1-Qwen2.5-14B |1.0000|± |0.0000| +|mm\|commonsenseqa_c\|0| 0|em |._models_Qwen2.5-14B |0.9343|± |0.0136| +| | |pem |._models_Qwen2.5-14B |0.9343|± |0.0136| +| | |pqem |._models_Qwen2.5-14B |0.9522|± |0.0117| +| | |qem |._models_Qwen2.5-14B |0.9343|± |0.0136| +|mm\|commonsenseqa\|0 | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9284|± |0.0141| +| | | |._merged_qwen_qwen_linear_3 |0.9284|± |0.0141| +| | | |._merged_qwen_qwen_linear_5 |0.9134|± |0.0154| +| | | |._merged_qwen_qwen_linear_7 |0.9254|± |0.0144| +| | | |._merged_qwen_qwen_linear_9 |0.9254|± |0.0144| +| | | |._merged_qwen_qwen_ties_1 |0.9015|± |0.0163| +| | | |._merged_qwen_qwen_ties_3 |0.9284|± |0.0141| +| | | |._merged_qwen_qwen_ties_5 |0.9254|± |0.0144| +| | | |._merged_qwen_qwen_ties_7 |0.9104|± |0.0156| +| | | |._merged_qwen_qwen_ties_9 |0.9194|± |0.0149| +| | | |._models_R1-Qwen2.5-14B |0.9433|± |0.0127| +|mm\|gpqa_diamond_c\|0 | 1|em |._models_Qwen2.5-14B |0.0000| | | +| | |pem |._models_Qwen2.5-14B |0.0000| | | +| | |pqem |._models_Qwen2.5-14B |0.0000| | | +| | |qem |._models_Qwen2.5-14B |0.0000| | | +|mm\|gpqa_diamond\|0 | 2|sem |._merged_qwen_qwen_dare_linear_1|0.0000| | | +| | | |._merged_qwen_qwen_dare_linear_3|0.0000| | | +| | | |._merged_qwen_qwen_dare_linear_5|0.0000| | | +| | | |._merged_qwen_qwen_dare_linear_7|0.0000| | | +| | | |._merged_qwen_qwen_dare_linear_9|0.0000| | | +| | | |._merged_qwen_qwen_linear_1 |0.0000| | | +| | | |._merged_qwen_qwen_linear_3 |0.0000| | | +| | | |._merged_qwen_qwen_linear_5 |0.0000| | | +| | | |._merged_qwen_qwen_linear_7 |0.0000| | | +| | | |._merged_qwen_qwen_linear_9 |0.0000| | | +| | | |._merged_qwen_qwen_ties_1 |0.0000| | | +| | | |._merged_qwen_qwen_ties_3 |0.0000| | | +| | | |._merged_qwen_qwen_ties_5 |1.0000| | | +| | | |._merged_qwen_qwen_ties_7 |0.0000| | | +| | | |._merged_qwen_qwen_ties_9 |0.0000| | | +| | | |._models_R1-Qwen2.5-14B |0.0000| | | +|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|._models_Qwen2.5-14B |0.9002|± |0.0137| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9439|± |0.0105| +| | | |._merged_qwen_qwen_linear_3 |0.9584|± |0.0091| +| | | |._merged_qwen_qwen_linear_5 |0.9667|± |0.0082| +| | | |._merged_qwen_qwen_linear_7 |0.9647|± |0.0084| +| | | |._merged_qwen_qwen_linear_9 |0.9501|± |0.0099| +| | | |._merged_qwen_qwen_ties_1 |0.9480|± |0.0101| +| | | |._merged_qwen_qwen_ties_3 |0.9688|± |0.0079| +| | | |._merged_qwen_qwen_ties_5 |0.9584|± |0.0091| +| | | |._merged_qwen_qwen_ties_7 |0.9647|± |0.0084| +| | | |._merged_qwen_qwen_ties_9 |0.9605|± |0.0089| +| | | |._models_R1-Qwen2.5-14B |0.9667|± |0.0082| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|._models_Qwen2.5-14B |0.8750|± |0.0446| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_3 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_5 |0.9821|± |0.0179| +| | | |._merged_qwen_qwen_linear_7 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_9 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_1 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_3 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_5 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_7 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_9 |1.0000|± |0.0000| +| | | |._models_R1-Qwen2.5-14B |1.0000|± |0.0000| +|mm\|truthfulqa_c\|0 | 0|em |._models_Qwen2.5-14B |0.9204|± |0.0256| +| | |pem |._models_Qwen2.5-14B |0.9204|± |0.0256| +| | |pqem |._models_Qwen2.5-14B |0.9204|± |0.0256| +| | |qem |._models_Qwen2.5-14B |0.9204|± |0.0256| +|mm\|truthfulqa\|0 | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.8319|± |0.0353| +| | | |._merged_qwen_qwen_linear_3 |0.8761|± |0.0311| +| | | |._merged_qwen_qwen_linear_5 |0.9027|± |0.0280| +| | | |._merged_qwen_qwen_linear_7 |0.9115|± |0.0268| +| | | |._merged_qwen_qwen_linear_9 |0.9204|± |0.0256| +| | | |._merged_qwen_qwen_ties_1 |0.8673|± |0.0321| +| | | |._merged_qwen_qwen_ties_3 |0.8850|± |0.0301| +| | | |._merged_qwen_qwen_ties_5 |0.9204|± |0.0256| +| | | |._merged_qwen_qwen_ties_7 |0.9115|± |0.0268| +| | | |._merged_qwen_qwen_ties_9 |0.8850|± |0.0301| +| | | |._models_R1-Qwen2.5-14B |0.9204|± |0.0256| + diff --git a/merge_qwen/logs/show_results1.log b/merge_qwen/logs/show_results1.log new file mode 100644 index 0000000000000000000000000000000000000000..e55a267f52d2d9241151aa8f3afa72f533d0cea0 --- /dev/null +++ b/merge_qwen/logs/show_results1.log @@ -0,0 +1,105 @@ +| Task |Version| Metric | Model |Value | |Stderr| +|--------------------|------:|---------------------|--------------------------------|-----:|---|-----:| +|arc_challenge | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9524|± |0.0107| +| | | |._merged_qwen_qwen_linear_3 |0.9749|± |0.0078| +| | | |._merged_qwen_qwen_linear_5 |0.9799|± |0.0070| +| | | |._merged_qwen_qwen_linear_7 |0.9799|± |0.0070| +| | | |._merged_qwen_qwen_linear_9 |0.9749|± |0.0078| +| | | |._merged_qwen_qwen_ties_1 |0.9724|± |0.0082| +| | | |._merged_qwen_qwen_ties_3 |0.9774|± |0.0074| +| | | |._merged_qwen_qwen_ties_5 |0.9674|± |0.0089| +| | | |._merged_qwen_qwen_ties_7 |0.9724|± |0.0082| +| | | |._merged_qwen_qwen_ties_9 |0.9724|± |0.0082| +| | | |._models_R1-Qwen2.5-14B |0.9825|± |0.0066| +| | | |._models_Qwen2.5-14B |0.9674|± |0.0089| +|arc_easy | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9855|± |0.0038| +| | | |._merged_qwen_qwen_linear_3 |0.9959|± |0.0021| +| | | |._merged_qwen_qwen_linear_5 |0.9948|± |0.0023| +| | | |._merged_qwen_qwen_linear_7 |0.9969|± |0.0018| +| | | |._merged_qwen_qwen_linear_9 |0.9959|± |0.0021| +| | | |._merged_qwen_qwen_ties_1 |0.9938|± |0.0025| +| | | |._merged_qwen_qwen_ties_3 |0.9907|± |0.0031| +| | | |._merged_qwen_qwen_ties_5 |0.9886|± |0.0034| +| | | |._merged_qwen_qwen_ties_7 |0.9907|± |0.0031| +| | | |._merged_qwen_qwen_ties_9 |0.9938|± |0.0025| +| | | |._models_R1-Qwen2.5-14B |1.0000|± |0.0000| +| | | |._models_Qwen2.5-14B |0.9959|± |0.0021| +|commonsenseqa | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9284|± |0.0141| +| | | |._merged_qwen_qwen_linear_3 |0.9284|± |0.0141| +| | | |._merged_qwen_qwen_linear_5 |0.9134|± |0.0154| +| | | |._merged_qwen_qwen_linear_7 |0.9254|± |0.0144| +| | | |._merged_qwen_qwen_linear_9 |0.9254|± |0.0144| +| | | |._merged_qwen_qwen_ties_1 |0.9015|± |0.0163| +| | | |._merged_qwen_qwen_ties_3 |0.9284|± |0.0141| +| | | |._merged_qwen_qwen_ties_5 |0.9254|± |0.0144| +| | | |._merged_qwen_qwen_ties_7 |0.9104|± |0.0156| +| | | |._merged_qwen_qwen_ties_9 |0.9194|± |0.0149| +| | | |._models_R1-Qwen2.5-14B |0.9433|± |0.0127| +| | | |._models_Qwen2.5-14B |0.9343|± |0.0136| +|gsm8k | 0|math_pass@1:1_samples|._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.9439|± |0.0105| +| | | |._merged_qwen_qwen_linear_3 |0.9584|± |0.0091| +| | | |._merged_qwen_qwen_linear_5 |0.9667|± |0.0082| +| | | |._merged_qwen_qwen_linear_7 |0.9647|± |0.0084| +| | | |._merged_qwen_qwen_linear_9 |0.9501|± |0.0099| +| | | |._merged_qwen_qwen_ties_1 |0.9480|± |0.0101| +| | | |._merged_qwen_qwen_ties_3 |0.9688|± |0.0079| +| | | |._merged_qwen_qwen_ties_5 |0.9584|± |0.0091| +| | | |._merged_qwen_qwen_ties_7 |0.9647|± |0.0084| +| | | |._merged_qwen_qwen_ties_9 |0.9605|± |0.0089| +| | | |._models_R1-Qwen2.5-14B |0.9667|± |0.0082| +| | | |._models_Qwen2.5-14B |0.9002|± |0.0137| +|math_500 | 3|math_pass@1:1_samples|._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_3 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_5 |0.9821|± |0.0179| +| | | |._merged_qwen_qwen_linear_7 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_9 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_1 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_3 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_5 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_7 |1.0000|± |0.0000| +| | | |._merged_qwen_qwen_ties_9 |1.0000|± |0.0000| +| | | |._models_R1-Qwen2.5-14B |1.0000|± |0.0000| +| | | |._models_Qwen2.5-14B |0.8750|± |0.0446| +|truthfulqa | 0|sem |._merged_qwen_qwen_dare_linear_1|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_3|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_5|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_7|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_dare_linear_9|0.0000|± |0.0000| +| | | |._merged_qwen_qwen_linear_1 |0.8319|± |0.0353| +| | | |._merged_qwen_qwen_linear_3 |0.8761|± |0.0311| +| | | |._merged_qwen_qwen_linear_5 |0.9027|± |0.0280| +| | | |._merged_qwen_qwen_linear_7 |0.9115|± |0.0268| +| | | |._merged_qwen_qwen_linear_9 |0.9204|± |0.0256| +| | | |._merged_qwen_qwen_ties_1 |0.8673|± |0.0321| +| | | |._merged_qwen_qwen_ties_3 |0.8850|± |0.0301| +| | | |._merged_qwen_qwen_ties_5 |0.9204|± |0.0256| +| | | |._merged_qwen_qwen_ties_7 |0.9115|± |0.0268| +| | | |._merged_qwen_qwen_ties_9 |0.8850|± |0.0301| +| | | |._models_R1-Qwen2.5-14B |0.9204|± |0.0256| +| | | |._models_Qwen2.5-14B |0.9204|± |0.0256| + diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a0ed33061a3482567718c9ce0a7952665660f1a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f951c39388338909084b7f16a63a5db25e7821d81e36762cc2a30ae06c8932d7 +size 4016455 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4b9afa2f8936218a903dd8c776300c413bb82b3e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d71117467353d91b79344ec13600454990ad3a8520c22aa0a28ad325c699ab5 +size 9106698 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cf32721c7312f60ef2358d20ebf2d78d97445d3d --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e554f34cf939550af7f5a593a992d6f5c8549c575b4f97477694b2c2575d97d5 +size 3603640 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3942b11794dc523d4abad57d758ead3c377b2edd --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da73ba6a6dc25c75fb4646736f989bc1ffc736145eab29588d15d24833d66c64 +size 40500 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..421690c177f7730af98c545b825dcf805137e5dc --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f002fd2b9549811a89b58583a36374cec1d3b7b9d97ce1d0a5b7428515b221c +size 4374828 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e02c8bdaa4551738190d37bf3819a12cb2418b23 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c0f3fa81b303a71386ff26518f08b65a47cc3eec1018cd9a5bd3c6075858f9 +size 610964 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e376005604dcd254f40acf0e505e8177b7073ea --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac9b2b6c2756e996bcbd8c76059c2260f9650b833d5f281b8aea597534a9a4e +size 1254871 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a0ed33061a3482567718c9ce0a7952665660f1a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f951c39388338909084b7f16a63a5db25e7821d81e36762cc2a30ae06c8932d7 +size 4016455 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4b9afa2f8936218a903dd8c776300c413bb82b3e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d71117467353d91b79344ec13600454990ad3a8520c22aa0a28ad325c699ab5 +size 9106698 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cf32721c7312f60ef2358d20ebf2d78d97445d3d --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e554f34cf939550af7f5a593a992d6f5c8549c575b4f97477694b2c2575d97d5 +size 3603640 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3942b11794dc523d4abad57d758ead3c377b2edd --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da73ba6a6dc25c75fb4646736f989bc1ffc736145eab29588d15d24833d66c64 +size 40500 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..421690c177f7730af98c545b825dcf805137e5dc --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f002fd2b9549811a89b58583a36374cec1d3b7b9d97ce1d0a5b7428515b221c +size 4374828 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e02c8bdaa4551738190d37bf3819a12cb2418b23 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c0f3fa81b303a71386ff26518f08b65a47cc3eec1018cd9a5bd3c6075858f9 +size 610964 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e376005604dcd254f40acf0e505e8177b7073ea --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac9b2b6c2756e996bcbd8c76059c2260f9650b833d5f281b8aea597534a9a4e +size 1254871 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a0ed33061a3482567718c9ce0a7952665660f1a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f951c39388338909084b7f16a63a5db25e7821d81e36762cc2a30ae06c8932d7 +size 4016455 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4b9afa2f8936218a903dd8c776300c413bb82b3e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d71117467353d91b79344ec13600454990ad3a8520c22aa0a28ad325c699ab5 +size 9106698 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cf32721c7312f60ef2358d20ebf2d78d97445d3d --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e554f34cf939550af7f5a593a992d6f5c8549c575b4f97477694b2c2575d97d5 +size 3603640 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3942b11794dc523d4abad57d758ead3c377b2edd --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da73ba6a6dc25c75fb4646736f989bc1ffc736145eab29588d15d24833d66c64 +size 40500 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..421690c177f7730af98c545b825dcf805137e5dc --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f002fd2b9549811a89b58583a36374cec1d3b7b9d97ce1d0a5b7428515b221c +size 4374828 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e02c8bdaa4551738190d37bf3819a12cb2418b23 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c0f3fa81b303a71386ff26518f08b65a47cc3eec1018cd9a5bd3c6075858f9 +size 610964 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e376005604dcd254f40acf0e505e8177b7073ea --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac9b2b6c2756e996bcbd8c76059c2260f9650b833d5f281b8aea597534a9a4e +size 1254871 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a0ed33061a3482567718c9ce0a7952665660f1a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f951c39388338909084b7f16a63a5db25e7821d81e36762cc2a30ae06c8932d7 +size 4016455 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4b9afa2f8936218a903dd8c776300c413bb82b3e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d71117467353d91b79344ec13600454990ad3a8520c22aa0a28ad325c699ab5 +size 9106698 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cf32721c7312f60ef2358d20ebf2d78d97445d3d --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e554f34cf939550af7f5a593a992d6f5c8549c575b4f97477694b2c2575d97d5 +size 3603640 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3942b11794dc523d4abad57d758ead3c377b2edd --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da73ba6a6dc25c75fb4646736f989bc1ffc736145eab29588d15d24833d66c64 +size 40500 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..421690c177f7730af98c545b825dcf805137e5dc --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f002fd2b9549811a89b58583a36374cec1d3b7b9d97ce1d0a5b7428515b221c +size 4374828 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e02c8bdaa4551738190d37bf3819a12cb2418b23 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c0f3fa81b303a71386ff26518f08b65a47cc3eec1018cd9a5bd3c6075858f9 +size 610964 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e376005604dcd254f40acf0e505e8177b7073ea --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac9b2b6c2756e996bcbd8c76059c2260f9650b833d5f281b8aea597534a9a4e +size 1254871 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a0ed33061a3482567718c9ce0a7952665660f1a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f951c39388338909084b7f16a63a5db25e7821d81e36762cc2a30ae06c8932d7 +size 4016455 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4b9afa2f8936218a903dd8c776300c413bb82b3e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d71117467353d91b79344ec13600454990ad3a8520c22aa0a28ad325c699ab5 +size 9106698 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cf32721c7312f60ef2358d20ebf2d78d97445d3d --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e554f34cf939550af7f5a593a992d6f5c8549c575b4f97477694b2c2575d97d5 +size 3603640 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3942b11794dc523d4abad57d758ead3c377b2edd --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da73ba6a6dc25c75fb4646736f989bc1ffc736145eab29588d15d24833d66c64 +size 40500 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..421690c177f7730af98c545b825dcf805137e5dc --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f002fd2b9549811a89b58583a36374cec1d3b7b9d97ce1d0a5b7428515b221c +size 4374828 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e02c8bdaa4551738190d37bf3819a12cb2418b23 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c0f3fa81b303a71386ff26518f08b65a47cc3eec1018cd9a5bd3c6075858f9 +size 610964 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e376005604dcd254f40acf0e505e8177b7073ea --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_dare_linear_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac9b2b6c2756e996bcbd8c76059c2260f9650b833d5f281b8aea597534a9a4e +size 1254871 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..92b6487eec1a517a9f2e3d4a8f220e6f58e26fb1 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7f88856f4bd1aa4312c37be41a8ba5c8a5781568d141a1404af7da5c0053584 +size 4260300 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6fb76c7dfedbfed3dcabed52f431bd25340e0a5a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:278da53812dd6f60b1d9141456603a5490e4bef090ce8a65a1076a6ab165116b +size 9673191 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a68cf2a7cf1ac702d7c037ca621fd212063fe73c --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:882cb54216e584fe9d2cc972fca6aa3e2120a8ce539573280ff2b53b81edcb5d +size 3733259 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..267d44084b04271711128970054f1143a8bb1720 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56acb2594dff7ea83ac3bbf612a9befe74a7914fa970ccd7ab4769ee7b5bb844 +size 33704 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..07ec208b25bc8248ac54b107bafb278318f4f8ce --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8a4b4fb46904e9f61e268ce3190fdf22d3a0ebb1a05e24afcf2145cb4c5854a +size 4520983 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f2f04c255e5e53aa77c9c7f224fcca2d525a7594 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1656ecfc07ea30757369587277cea3d76fa2d356a0bd57374a143dca86e8fe6 +size 627966 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0947e8029792e41a0821e94d29a41e0c3fc4064 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf9e8b1110992d05b73a667813c7040ce442c1b5793de57eae52169eb7fedd05 +size 1313028 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6a2a2e3f51fdc221de06807cf03d93080c524053 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91472943882bc8e894232b2f95ed830f06c58eaa587ca352c250ad2af2677030 +size 4312314 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2c77d09cc0594a72b8ec960748eb26f4fff52eee --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b8c13d5c7f39f10f1ff028f776365e70e470c41640bf34151de73f91321c91f +size 9814812 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..997e664c8a0bb5307236934ba5d07cf9d23e82ee --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24176e71d47728582dac59088bfb62695922d42650e04ff15f8e9bc04c595780 +size 3862785 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f5bd966811ba2cc801380b1588314095d1dfd79a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56929fa24021488479a49d522df714a573b70ff85025952c361fa091dc1ab619 +size 46286 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..13efa2d6cadc7989596606aa9ce65230af9c3f34 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23bc78c5d4e1a8390ccf65f91f365427ff545b9545d9020d0dffebbb265229b3 +size 4577043 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a23dfb9f267ed40dafb71a5787871b9f4d637f4f --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bafa7470c8357d4336e24f7403d0ba7cdcb2fdba11f9c2cd1317f17a9dca30d8 +size 633313 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7079ca33b73ae10d12ad1dce4b43987e7f2e7848 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e724063649b14e95f7937330d69c622050e71588ce9af470fde94dfaf7dfba77 +size 1336315 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4f4fbdbb64604d87673ae60f8867e54f947494e4 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e8bc0e5f08d4808b5c20fb3147b6412d69028d366b0d821fb5f82bfee4f35c3 +size 4434353 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f8b247b1e2c3b7be50e394209261f5e5f469b69a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ead8798a9cc778290df0d861df1cebdd460479cfe09672e8bf5c767f37469c48 +size 10082693 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0cdcfe396470bc54e3f8626096d16be6b3f3489 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:614a9813315b40fb07ad49978cce22246a8e0550136abef0fdefd478ac39a4f4 +size 3923930 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..66965f5de32392d0fae4ebcebefeca55d7d39aa2 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b873ed2880f9b052f4848a55320e5d01edb3e5f31a6815516a811869bc1168bb +size 48487 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..06340b7e2a44e02305a2f0361d1ed3b28b7019a7 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fdf452d11e259fd1753787408a9365fe8d2b9e531ce3a9bfe35bdce14b2f517 +size 4616349 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d045c32bb45327d042b573abcdf1c2c705e349ec --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b015e8272ad2df1157e25ec7fe7d1a59d52a6f586c1270b9f2492a075433b68 +size 647359 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9548e41e6390e9b5f7eb6f38e6939d167c4b9387 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ca89604d456261dce28b08fef7052ce3b1609b37cbe1e4be227da2498102042 +size 1376008 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b7535df0905c44ca3be86c62833098d61e75048e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffcfd6bb67155abd98749fa48fcbe3aa19268a9a79daffb0761d9f49d46f0e5e +size 4486906 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ca24e15216167cc50c6806aa576292e9ff9a9c84 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d1aeb144b37fa9e14c75b54a4d9e7515a5da85878d6ceaf7a3b35a37abc65a3 +size 10207973 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fc467f5503dc7fa19e24f154fa751e364e22a3f8 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33df1628c6147ac1027b339bbbf9932878e31010e43711876bc021eb71e4f15a +size 3997497 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..601a39ee73546dac4615e7edf89092ff6a003064 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ed6f147679cba242f195feb12bd60102e95631890d1edb55c45e8ecb59618a0 +size 35269 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3f36d3e044b5fc94d9efa1da52a19a025a2e39c0 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ef767fc72d9474b6265dfd567d3d0941009bbb4f56eff55fdb5be5e9295e1de +size 4685309 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6bd3ddb440223a522980704b1e4328e71dc8398e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aac03513506892bbe5042f44bcdb79884dc225a722a824b95aac48d03017b26 +size 656913 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f2a4265b7a58e8a6ab15d67b5054c400d5993cb0 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09a5c98c15fb8ff9a208c0445ec60b0b0e0fe841426e0c34ff3f0d8ad4d1f5e6 +size 1405849 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d1b1454d7517463f542cd0de515c73a99fce773f --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0aa7c6e7c2bb059a1b758daa6ece5dd82ad8897595e22a35a93302904b48a32 +size 4548630 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c7236cab18d7e8c557cce7cccd575de70ffd000e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89b29fa6c6a0021f5e7db73742336491edae2a141d54fde2dee04272aa9397eb +size 10357559 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..359d01fc1ebb2946e734aa3b8f0517b9016a1378 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d893a7d113354a03563aa4ba208c17f5d75c36f90e733d591db33eefd4ed617 +size 4074367 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbbc2cf49fccdfe6937eb5066689242a98f5318c --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5ad457b87a20d4cca28f43a56a816f51538707ca3e282152b9c3582824f2b7c +size 35412 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4a9a0eacd4e316c27c9085fd9101f8fc6e7ccd46 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8030e6f50e132e3e6fae95b271e20ea14a6f9a7fb73620a67703a3011de9fe25 +size 4770830 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a3b86eeabba1f5a0a2fcf3c345562b7bd5578f1d --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:927b5c407cc1fc5eeb0eca12a0d0006674a9894e6fae151bede15d43cac1bb23 +size 668276 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c35f33d4aafa75695484432b6952f979aec8d142 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_linear_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66830ec4eb9ad5131016ac73708e9fe3675e722ce2227a98b28a20e8c1e68e78 +size 1425521 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..182623f9f61191016890dd74776c32391aeecf24 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f6e40607dbe1404d87f3e492ef9160ceda17090bbcc89a11bf5c810f889c5b6 +size 4332232 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1cf5e66ae4d3fc865d725469dfbee2998e609135 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b8e72f06e0ef8c063dc9a47f522d2b8b50ca1f1745c92747fe01f87a8b5fd7d +size 9861400 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..855ef82cf40ab9ceaa3adb109112199715ffc5b5 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c03828d6d46bfe7c98519f43ff0fd33e6a34ac883f2a5be8c1233f0f2a783e60 +size 3832384 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3fbadd01ff2e01c74d367ee1615a56f11324e614 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cfcef04f92e37adce0db1c6b43fd2878397e23d3cfebae19109056e202248b7 +size 40541 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..df0246927dbdd890df8bd70efbda5c822d2b4d7a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07d41f5709f16b6fced43a4885f2e796749e14493b3cf79ad9792f5ec7396840 +size 4560950 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9c1643ef7a2ccc2ee722a4cfd07ce62c93b3bdb9 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb10cec5d9c21c6e58160a8cd78d8ed339c6e14abff6d047041c190918f15fab +size 641966 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4aa240b4b19ac3fad1e877c915531eaa031b00f3 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_1/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:102fb3ded3478cda3284bd787c8ab44cc85d5ead19dfffc2629a3d92adea2a05 +size 1347672 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..00c1a188a968e4b73bc5e3286e63cf88ac4113ab --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0b23382f5ac5c3650b9ce38cfb0a1c53a6e9fb79633c6d46ef90b6cd6d3c70f +size 4515688 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3ee898da7cc75a8e142660ec2a789019bc418195 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a47e0c2bbd7e4f463c8859537eafa9c21bd370c4127a5e46d11760ffa76e8375 +size 10286564 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a36708a537cce6a09774d0bd0374b6d9921499f8 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85ce480011ea52aacd25e0174c5f097bdefc5cf81c1119f8ad28ba47242c71bd +size 4013224 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c5d3292ae4f24663b629f40c9c9c7e67035c0435 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:842aa66180e78548ee02ccf4107b14e38dec7e4bc09575525751ed1d6e0e87a5 +size 49829 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6bb0114fe85d3cc068d1d83b736e1c52f2dc60e2 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad029ce82addd62363824018ec6fef98ee819a45f2f1ea1a7f0783139a0df168 +size 4743843 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b297f48d7f09b588a3bfec5d048dd81cc15f6326 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46ea91d7d0155cbbf13455a99686958bf905b096361c625fb5f2dffae7ae05a3 +size 661260 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..35295489485aa2bb05ef3bf6f5701e5b5b8433fe --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_3/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5b60300bd894ec99fc7c817393797c82907cbd149a17e60c7269fe038ab32f2 +size 1404670 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5f36af55265d0aa29cc9f117a384ff9da9a3c51a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ef61dc221451022d4639778a06ada29154002ffe4b14bee6c772eb3e93c83ef +size 4588457 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fbdea6bb335d6f6839fee8740c53f58fcbf12c6e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f56e87372c1c9481f8123b4809d013788d7c5019288e89c88804287a72398c34 +size 10410973 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bb1cb7ad8fe2bb1ee3f74e2941a2c854ecc86a9c --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea1f4dda5150bbb38eea57dc46a7695690fb35a3a3d1f5694ede33baa2c3bad3 +size 4065737 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6412d5955e85c00452a4190eb61b67216ad7e646 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75d1a85d0c520faff88947c86672ac0e21323c716fd472be446667132e05edd8 +size 35151 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..46e596e8afbb9f477f93e82bf4719d91cd18df85 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b4bb1621dddfb2dc63e85685b726fe03c3141a2126d543ab4bb1ee2361aafa5 +size 4796452 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fe68a1328023e1d27ad26ce1e1be5778101f39eb --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c627d94bc0d4280e000682a12d8851e38c515cb9aaab5b015cc61aba0eb011c8 +size 669129 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1b494be95d24db3ef8512d7e03546398a339c85f --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_5/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29bfd50e72edee67c7f28c29fe6caa0fa4f301354cc63aafbb05eb9d3a4c0d70 +size 1429110 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f4d1c3668d77566e42d6254e869d881caae7b88a --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a38016201c4f2cadb6fb11cb26aba3fdd764675e5a5c81ea0b01e64c31a0d826 +size 4593243 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..84a363b62a84e4a6d3217beaf7f234617b5cbceb --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f2643aa0f6397509e2a645cb6b5e042ea397394fb5cb4d599fe87747e038ee3 +size 10447450 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3ca2ad5ec175a734c10b11bf652de23583f3972e --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f837e133b266011eeef88d4f92ef959509688004e8cf210d714689fef9c0c3f +size 4112418 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbd797146070555729e6742b4d1bdc9114b45a66 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a284c1f010746cdd12afaae69a73d8275c825aa93eb6f60c6975613621732cd +size 36334 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5975f1fdd3b929cd18be988a160f0d40f63af2e3 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:467804c0624fe288651ce2f261419e0e60469099655629ff4302e37b4585cde3 +size 4812987 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..88a4f87d3936bb5b92b8f7a0ea53b60ca8a73262 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b6c663057800fa768c846b148cf53acfc729a4b36bc8c68b7f4b316980a9f25 +size 676833 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e6e2a7224f196eb42f43bf0c78cc803c75795357 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_7/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11005a7a8d2594a208a59de5d08f528298812bc6e5278e40e60c953412d072c9 +size 1434225 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0c3bb9304d25525482f7fb605f3e53fc4ed4f2e9 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cac48277c13c833c695786e5a47ee41bbb7dfacc7f9a7cce8405e373b87608e +size 4597710 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6687c476e5fb5097034a9e556b9c1cb8edc523d1 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0ed2847102d24862ca2248f392dd34e35a6830eb323f465e63fdb03d16873e5 +size 10447183 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c76d05ebb63a0e056b3845b32273b56f04ad4b67 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c9a431fb125a567d8c497558ddfbdf2e5382f638f0df99f897ab7e81e6af73 +size 4107310 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b486175a280fec3b0e3998aedd804e32d45cc488 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:970c7bf6460e5ac569901c767c6bacf69366d8c6159b3e913ad240b3fc5edfa3 +size 35026 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b8782257752b5b647d8d7ea461f5d7904064dcb7 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c035c5054054fd55d6a4073b50b74f96479e605dcabf1750b73b07b8b0d93c +size 4819207 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..29eba4da901a44581210687322eac7f12cc92788 --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d252b38149e2864d1038a0b4c3386c8469ae6a48ea2f11ff097632ae172fde3 +size 674915 diff --git a/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4bd17eb7a00adb4b2045c535f47f32179d2f9a1b --- /dev/null +++ b/merge_qwen/outputs/._merged_qwen_qwen_ties_9/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60c21281e05054bf37fb2dfe9f3659b88bce789f006852cecaafffea1ce67ae6 +size 1439750 diff --git a/merge_llama/results/._merged2_llama_dare_linear_9/results_2025-06-23T10-15-33.465228.json b/merge_qwen/results/._merged_qwen_qwen_dare_linear_1/results_2025-07-09T04-07-37.799889.json similarity index 78% rename from merge_llama/results/._merged2_llama_dare_linear_9/results_2025-06-23T10-15-33.465228.json rename to merge_qwen/results/._merged_qwen_qwen_dare_linear_1/results_2025-07-09T04-07-37.799889.json index 398edc1f1d44a3bfd7106eb58e47a2f3b335f335..68e1d217c8179471249f03fd41ac2888e4edda36 100644 --- a/merge_llama/results/._merged2_llama_dare_linear_9/results_2025-06-23T10-15-33.465228.json +++ b/merge_qwen/results/._merged_qwen_qwen_dare_linear_1/results_2025-07-09T04-07-37.799889.json @@ -1,17 +1,5 @@ { "results": { - "mm|gsm8k|0": { - "math_pass@1:1_samples": 0.0, - "math_pass@1:1_samples_stderr": 0.0 - }, - "mm|arc_easy|0": { - "sem": 0.0, - "sem_stderr": 0.0 - }, - "mm|math_500|0": { - "math_pass@1:1_samples": 0.0, - "math_pass@1:1_samples_stderr": 0.0 - }, "mm|arc_challenge|0": { "sem": 0.0, "sem_stderr": 0.0 @@ -20,18 +8,30 @@ "sem": 0.0, "sem_stderr": 0.0 }, - "mm|gpqa_diamond|0": { - "sem": 0.0 + "mm|arc_easy|0": { + "sem": 0.0, + "sem_stderr": 0.0 }, "mm|commonsenseqa|0": { "sem": 0.0, "sem_stderr": 0.0 }, - "all": { + "mm|math_500|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { "math_pass@1:1_samples": 0.0, - "math_pass@1:1_samples_stderr": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { "sem": 0.0, - "sem_stderr": 0.0 + "sem_stderr": 0.0, + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 } }, "versions": { @@ -46,12 +46,12 @@ "mm|truthfulqa|0": 0 }, "size": { - "mm|gsm8k|0": 569, - "mm|arc_easy|0": 835, - "mm|math_500|0": 47, - "mm|arc_challenge|0": 321, - "mm|truthfulqa|0": 84, - "mm|gpqa_diamond|0": 1, - "mm|commonsenseqa|0": 283 + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 } } \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_dare_linear_3/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_dare_linear_3/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..68e1d217c8179471249f03fd41ac2888e4edda36 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_dare_linear_3/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|truthfulqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|arc_easy|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.0, + "sem_stderr": 0.0, + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_dare_linear_5/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_dare_linear_5/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..68e1d217c8179471249f03fd41ac2888e4edda36 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_dare_linear_5/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|truthfulqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|arc_easy|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.0, + "sem_stderr": 0.0, + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_dare_linear_7/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_dare_linear_7/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..68e1d217c8179471249f03fd41ac2888e4edda36 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_dare_linear_7/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|truthfulqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|arc_easy|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.0, + "sem_stderr": 0.0, + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_dare_linear_9/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_dare_linear_9/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..68e1d217c8179471249f03fd41ac2888e4edda36 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_dare_linear_9/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|truthfulqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|arc_easy|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.0, + "sem_stderr": 0.0, + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_linear_1/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_linear_1/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..c634fd1633190d78de2dbf6b707bbfb9571b061e --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_linear_1/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9523809523809523, + "sem_stderr": 0.010674662849100467 + }, + "mm|truthfulqa|0": { + "sem": 0.831858407079646, + "sem_stderr": 0.03533891997128865 + }, + "mm|arc_easy|0": { + "sem": 0.9855222337125129, + "sem_stderr": 0.003843221637867928 + }, + "mm|commonsenseqa|0": { + "sem": 0.9283582089552239, + "sem_stderr": 0.01411132437126286 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9438669438669439, + "math_pass@1:1_samples_stderr": 0.010506162491428087 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7396239604256671, + "sem_stderr": 0.015992032207379974, + "math_pass@1:1_samples": 0.971933471933472, + "math_pass@1:1_samples_stderr": 0.0052530812457140435 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_linear_3/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_linear_3/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..11f10a1609e24726c8c00e5d714426006291e569 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_linear_3/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.974937343358396, + "sem_stderr": 0.007835385506748812 + }, + "mm|truthfulqa|0": { + "sem": 0.8761061946902655, + "sem_stderr": 0.031131078051925432 + }, + "mm|arc_easy|0": { + "sem": 0.9958634953464323, + "sem_stderr": 0.0020650382575120585 + }, + "mm|commonsenseqa|0": { + "sem": 0.9283582089552239, + "sem_stderr": 0.014111324371262857 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9584199584199584, + "math_pass@1:1_samples_stderr": 0.00911170740977455 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7550530484700635, + "sem_stderr": 0.01378570654686229, + "math_pass@1:1_samples": 0.9792099792099792, + "math_pass@1:1_samples_stderr": 0.004555853704887275 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_linear_5/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_linear_5/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..863f777b1f111fde4456fda6e305bd7eafb4e8d8 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_linear_5/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9799498746867168, + "sem_stderr": 0.007026174643511088 + }, + "mm|truthfulqa|0": { + "sem": 0.9026548672566371, + "sem_stderr": 0.02800973330101709 + }, + "mm|arc_easy|0": { + "sem": 0.9948293691830403, + "sem_stderr": 0.0023075839035582224 + }, + "mm|commonsenseqa|0": { + "sem": 0.9134328358208955, + "sem_stderr": 0.015386564855067493 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.9821428571428571, + "math_pass@1:1_samples_stderr": 0.017857142857142863 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9667359667359667, + "math_pass@1:1_samples_stderr": 0.008185039370074629 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.758173389389458, + "sem_stderr": 0.013182514175788475, + "math_pass@1:1_samples": 0.974439411939412, + "math_pass@1:1_samples_stderr": 0.013021091113608746 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_linear_7/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_linear_7/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..3c12c00d584c0fc054aba04c4486ae948538a4f7 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_linear_7/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9799498746867168, + "sem_stderr": 0.007026174643511089 + }, + "mm|truthfulqa|0": { + "sem": 0.911504424778761, + "sem_stderr": 0.026836826441660896 + }, + "mm|arc_easy|0": { + "sem": 0.9968976215098242, + "sem_stderr": 0.0017893038937826949 + }, + "mm|commonsenseqa|0": { + "sem": 0.9253731343283582, + "sem_stderr": 0.0143791366442404 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9646569646569647, + "math_pass@1:1_samples_stderr": 0.00842786860112732 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.762745011060732, + "sem_stderr": 0.012507860405798769, + "math_pass@1:1_samples": 0.9823284823284824, + "math_pass@1:1_samples_stderr": 0.00421393430056366 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_linear_9/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_linear_9/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..63f3cb0ee2469a101bf857299064042f7a76c0e5 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_linear_9/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.974937343358396, + "sem_stderr": 0.0078353855067488 + }, + "mm|truthfulqa|0": { + "sem": 0.9203539823008849, + "sem_stderr": 0.025582941017507035 + }, + "mm|arc_easy|0": { + "sem": 0.9958634953464323, + "sem_stderr": 0.0020650382575120472 + }, + "mm|commonsenseqa|0": { + "sem": 0.9253731343283582, + "sem_stderr": 0.014379136644240405 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9501039501039501, + "math_pass@1:1_samples_stderr": 0.009937977881677435 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7633055910668143, + "sem_stderr": 0.01246562535650207, + "math_pass@1:1_samples": 0.975051975051975, + "math_pass@1:1_samples_stderr": 0.004968988940838717 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_ties_1/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_ties_1/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..9e896b6040d1820b682ca7e14ac644b8255d2722 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_ties_1/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9724310776942355, + "sem_stderr": 0.00820725209839997 + }, + "mm|truthfulqa|0": { + "sem": 0.8672566371681416, + "sem_stderr": 0.03206056802254301 + }, + "mm|arc_easy|0": { + "sem": 0.9937952430196484, + "sem_stderr": 0.002526517331273208 + }, + "mm|commonsenseqa|0": { + "sem": 0.9014925373134328, + "sem_stderr": 0.016305810881641264 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9480249480249481, + "math_pass@1:1_samples_stderr": 0.010131802853231764 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7469950990390917, + "sem_stderr": 0.014775037083464362, + "math_pass@1:1_samples": 0.974012474012474, + "math_pass@1:1_samples_stderr": 0.005065901426615882 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_ties_3/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_ties_3/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..25258e3f38d6a9033a003607b2cb9e9bf2ca6a66 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_ties_3/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9774436090225563, + "sem_stderr": 0.007442847600941792 + }, + "mm|truthfulqa|0": { + "sem": 0.8849557522123894, + "sem_stderr": 0.03014978512695784 + }, + "mm|arc_easy|0": { + "sem": 0.9906928645294726, + "sem_stderr": 0.0030895054948574235 + }, + "mm|commonsenseqa|0": { + "sem": 0.9283582089552239, + "sem_stderr": 0.014111324371262855 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9688149688149689, + "math_pass@1:1_samples_stderr": 0.007933647360953108 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7562900869439285, + "sem_stderr": 0.013698365648504978, + "math_pass@1:1_samples": 0.9844074844074844, + "math_pass@1:1_samples_stderr": 0.003966823680476554 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_ties_5/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_ties_5/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..11d24dce1194578711e9e3b77ad5caa7c8b2f816 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_ties_5/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9674185463659147, + "sem_stderr": 0.008899198557045847 + }, + "mm|truthfulqa|0": { + "sem": 0.9203539823008849, + "sem_stderr": 0.025582941017507042 + }, + "mm|arc_easy|0": { + "sem": 0.9886246122026887, + "sem_stderr": 0.0034120096550116145 + }, + "mm|commonsenseqa|0": { + "sem": 0.9253731343283582, + "sem_stderr": 0.014379136644240405 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9584199584199584, + "math_pass@1:1_samples_stderr": 0.009111707409774537 + }, + "mm|gpqa_diamond|0": { + "sem": 1.0 + }, + "all": { + "sem": 0.9603540550395693, + "sem_stderr": 0.013068321468451227, + "math_pass@1:1_samples": 0.9792099792099792, + "math_pass@1:1_samples_stderr": 0.004555853704887269 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_ties_7/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_ties_7/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..c28a159d71f660a52578b6608d6fb7f153d24ad9 --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_ties_7/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9724310776942355, + "sem_stderr": 0.00820725209839995 + }, + "mm|truthfulqa|0": { + "sem": 0.911504424778761, + "sem_stderr": 0.02683682644166091 + }, + "mm|arc_easy|0": { + "sem": 0.9906928645294726, + "sem_stderr": 0.0030895054948574213 + }, + "mm|commonsenseqa|0": { + "sem": 0.9104477611940298, + "sem_stderr": 0.015624009940447076 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9646569646569647, + "math_pass@1:1_samples_stderr": 0.008427868601127304 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7570152256392998, + "sem_stderr": 0.01343939849384134, + "math_pass@1:1_samples": 0.9823284823284824, + "math_pass@1:1_samples_stderr": 0.004213934300563652 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._merged_qwen_qwen_ties_9/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._merged_qwen_qwen_ties_9/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..524b7cf0c941a55df3cf14a6d1cefd20a79ed8bb --- /dev/null +++ b/merge_qwen/results/._merged_qwen_qwen_ties_9/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9724310776942355, + "sem_stderr": 0.008207252098399977 + }, + "mm|truthfulqa|0": { + "sem": 0.8849557522123894, + "sem_stderr": 0.03014978512695783 + }, + "mm|arc_easy|0": { + "sem": 0.9937952430196484, + "sem_stderr": 0.002526517331273207 + }, + "mm|commonsenseqa|0": { + "sem": 0.9194029850746268, + "sem_stderr": 0.014894955151109748 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9604989604989606, + "math_pass@1:1_samples_stderr": 0.0088906209217187 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7541170116001801, + "sem_stderr": 0.013944627426935193, + "math_pass@1:1_samples": 0.9802494802494803, + "math_pass@1:1_samples_stderr": 0.00444531046085935 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/merge_qwen/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..8961949e7ebe623b9fcfb6add1be3c940740d2cc --- /dev/null +++ b/merge_qwen/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,81 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 0.9958634953464323, + "em_stderr": 0.002065038257512051, + "qem": 0.9958634953464323, + "qem_stderr": 0.002065038257512051, + "pem": 0.9958634953464323, + "pem_stderr": 0.002065038257512051, + "pqem": 0.9968976215098242, + "pqem_stderr": 0.0017893038937827073 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.0, + "qem": 0.0, + "pem": 0.0, + "pqem": 0.0 + }, + "mm|truthfulqa_c|0": { + "em": 0.9203539823008849, + "em_stderr": 0.025582941017507032, + "qem": 0.9203539823008849, + "qem_stderr": 0.025582941017507032, + "pem": 0.9203539823008849, + "pem_stderr": 0.025582941017507032, + "pqem": 0.9203539823008849, + "pqem_stderr": 0.025582941017507032 + }, + "mm|arc_challenge_c|0": { + "em": 0.9674185463659147, + "em_stderr": 0.008899198557045856, + "qem": 0.9674185463659147, + "qem_stderr": 0.008899198557045856, + "pem": 0.9674185463659147, + "pem_stderr": 0.008899198557045856, + "pqem": 0.974937343358396, + "pqem_stderr": 0.007835385506748825 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.875, + "math_pass@1:1_samples_stderr": 0.04459412925079224 + }, + "mm|commonsenseqa_c|0": { + "em": 0.9343283582089552, + "em_stderr": 0.013553937189608768, + "qem": 0.9343283582089552, + "qem_stderr": 0.013553937189608768, + "pem": 0.9343283582089552, + "pem_stderr": 0.013553937189608768, + "pqem": 0.9522388059701492, + "pqem_stderr": 0.011669098140869312 + }, + "mm|gsm8k_c|0": { + "math_pass@1:1_samples": 0.9002079002079002, + "math_pass@1:1_samples_stderr": 0.013680402391985067 + }, + "all": { + "em": 0.7635928764444374, + "em_stderr": 0.012525278755418427, + "qem": 0.7635928764444374, + "qem_stderr": 0.012525278755418427, + "pem": 0.7635928764444374, + "pem_stderr": 0.012525278755418427, + "pqem": 0.7688855506278509, + "pqem_stderr": 0.01171918213972697, + "math_pass@1:1_samples": 0.8876039501039501, + "math_pass@1:1_samples_stderr": 0.029137265821388654 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/merge_qwen/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/merge_qwen/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..3fd9bfe2e58a490f76f026cf5c5e91ff25cfa1a1 --- /dev/null +++ b/merge_qwen/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9824561403508771, + "sem_stderr": 0.0065807837808415995 + }, + "mm|truthfulqa|0": { + "sem": 0.9203539823008849, + "sem_stderr": 0.025582941017507042 + }, + "mm|arc_easy|0": { + "sem": 1.0, + "sem_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.9432835820895522, + "sem_stderr": 0.0126561700474127 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9667359667359667, + "math_pass@1:1_samples_stderr": 0.00818503937007462 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7692187409482629, + "sem_stderr": 0.011204973711440335, + "math_pass@1:1_samples": 0.9833679833679834, + "math_pass@1:1_samples_stderr": 0.00409251968503731 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/test/0-1k/logs/Qwen2.5-14B.log b/test/0-1k/logs/Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..8914169fda724c9a027969db3b66bc013e55eb36 --- /dev/null +++ b/test/0-1k/logs/Qwen2.5-14B.log @@ -0,0 +1,32 @@ +INFO 07-09 16:25:04 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:25:04 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|--------------------|------:|---------------------|-----:|---|-----:| +|all | |em |0.7636|± |0.0125| +| | |qem |0.7636|± |0.0125| +| | |pem |0.7636|± |0.0125| +| | |pqem |0.7689|± |0.0117| +| | |math_pass@1:1_samples|0.8876|± |0.0291| +|mm\|arc_challenge_c\|0| 0|em |0.9674|± |0.0089| +| | |qem |0.9674|± |0.0089| +| | |pem |0.9674|± |0.0089| +| | |pqem |0.9749|± |0.0078| +|mm\|arc_easy_c\|0 | 0|em |0.9959|± |0.0021| +| | |qem |0.9959|± |0.0021| +| | |pem |0.9959|± |0.0021| +| | |pqem |0.9969|± |0.0018| +|mm\|commonsenseqa_c\|0| 0|em |0.9343|± |0.0136| +| | |qem |0.9343|± |0.0136| +| | |pem |0.9343|± |0.0136| +| | |pqem |0.9522|± |0.0117| +|mm\|gpqa_diamond_c\|0 | 1|em |0.0000| | | +| | |qem |0.0000| | | +| | |pem |0.0000| | | +| | |pqem |0.0000| | | +|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|0.9002|± |0.0137| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|0.8750|± |0.0446| +|mm\|truthfulqa_c\|0 | 0|em |0.9204|± |0.0256| +| | |qem |0.9204|± |0.0256| +| | |pem |0.9204|± |0.0256| +| | |pqem |0.9204|± |0.0256| + diff --git a/test/0-1k/logs/R1-Qwen2.5-14B.log b/test/0-1k/logs/R1-Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..77ffdd024e8b826ef73311a5f8bba235c13d475a --- /dev/null +++ b/test/0-1k/logs/R1-Qwen2.5-14B.log @@ -0,0 +1,14 @@ +INFO 07-09 16:23:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:23:25 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.7692|± |0.0112| +| | |math_pass@1:1_samples|0.9834|± |0.0041| +|mm\|arc_challenge\|0| 0|sem |0.9825|± |0.0066| +|mm\|arc_easy\|0 | 0|sem |1.0000|± |0.0000| +|mm\|commonsenseqa\|0| 0|sem |0.9433|± |0.0127| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9667|± |0.0082| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.9204|± |0.0256| + diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b6156eb622d8933bdc4f0cfb5a8f51ee99328e4e --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73eea6bb87dabfff096940a35ab69aff9ff7e961a00ad4d7fa67d459a4b0c21e +size 9868 diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..43cfe1af403ccdf8526d5a204b8d301026f15980 --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:311b27447d9c787522dd5c95904ac166c9e9e98484f2e8fbd27fc09658ee76a2 +size 144505 diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7f3d16e9330269d9866ea8d6b0b2f28985c958f3 --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df98f1b935346ff25bf968ef624fcde32a668516ed2a4b015fce3060b2b848b0 +size 298935 diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..636476a786e502aad284c755429a11be99806b7a --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2cd3ea30aad411248ec870ce34e8698a05a4484fde774179c22466bd901a77f +size 89083 diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b6d3dfecd45345ffe200d1469ce1994aa55e8750 --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e158f42b9d15a64c6c7d7c2b6a67cf8440ce01660587148ccfa84f746c62ecaa +size 18382 diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6d3da2c54e6ff1e0086fe712826773f7eaf9f846 --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e221610fc9b65da8b4790e86e040e927974d16bdb07b80a9c930c84b7dce893 +size 1091882 diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3116a44d914eb8342f737350c7d044a2b12b9855 --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b98ec65862458e53b0282d4b1c075064f6f5247f507d6438fbf03ebeb0c708a1 +size 160941 diff --git a/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..65262ec5e9037c6d15a1a9e0d5d9f3cbce2b33b2 --- /dev/null +++ b/test/0-1k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33d75b6ee02bf449f0757c1147bcf73f08a1d87f74d9bf733813ac4c80d38672 +size 49689 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b6156eb622d8933bdc4f0cfb5a8f51ee99328e4e --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73eea6bb87dabfff096940a35ab69aff9ff7e961a00ad4d7fa67d459a4b0c21e +size 9868 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7a5a2c965bef6078254eb1490796a490c712ef80 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c8120d924b2279903459f62ebfe944e3ca09f108c49e12d8b12ade52b18c784 +size 4582345 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..94ea412001277869b6def509e719532af51ba80a --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02264be741d73ce4e69e440cd2d9ecea4d44b4c2b236a60b0f726fdceff4060e +size 10414901 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f83bdd499b69a0d2dd6fa74baf1188b838c660a5 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:458c5249c1db7bc4c93369e723fe1635ddc6ef813306b3f071993b4672ba2365 +size 4078706 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b199178e4f894cf18727d73abdb2ff2b60a9c7f0 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03dee55adf296480039099bd88789ff23e8a6ce8a0f5728ea8aaee6c2e625fe9 +size 51717 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f24dd130674f01d1afd7372d8522f8f8a6b35259 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5025e4428631845187d0524b571924e4e4fa6ed214eca2131e495b452056bb31 +size 4800610 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6c96a7553c04669dd003d18b1291e70124ce6c8b --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d0159b36ad4e35ea63ae5181f1abe7f92d599bf61b6021fbab1363711c0082b +size 671100 diff --git a/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c2c852976833feb7b9ecb82870d3da4d88c67959 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0474394184387849716381b03604a13c9aaeb818d25806ca11a40bba1240100 +size 1423921 diff --git a/test/0-1k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/test/0-1k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..8961949e7ebe623b9fcfb6add1be3c940740d2cc --- /dev/null +++ b/test/0-1k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,81 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 0.9958634953464323, + "em_stderr": 0.002065038257512051, + "qem": 0.9958634953464323, + "qem_stderr": 0.002065038257512051, + "pem": 0.9958634953464323, + "pem_stderr": 0.002065038257512051, + "pqem": 0.9968976215098242, + "pqem_stderr": 0.0017893038937827073 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.0, + "qem": 0.0, + "pem": 0.0, + "pqem": 0.0 + }, + "mm|truthfulqa_c|0": { + "em": 0.9203539823008849, + "em_stderr": 0.025582941017507032, + "qem": 0.9203539823008849, + "qem_stderr": 0.025582941017507032, + "pem": 0.9203539823008849, + "pem_stderr": 0.025582941017507032, + "pqem": 0.9203539823008849, + "pqem_stderr": 0.025582941017507032 + }, + "mm|arc_challenge_c|0": { + "em": 0.9674185463659147, + "em_stderr": 0.008899198557045856, + "qem": 0.9674185463659147, + "qem_stderr": 0.008899198557045856, + "pem": 0.9674185463659147, + "pem_stderr": 0.008899198557045856, + "pqem": 0.974937343358396, + "pqem_stderr": 0.007835385506748825 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.875, + "math_pass@1:1_samples_stderr": 0.04459412925079224 + }, + "mm|commonsenseqa_c|0": { + "em": 0.9343283582089552, + "em_stderr": 0.013553937189608768, + "qem": 0.9343283582089552, + "qem_stderr": 0.013553937189608768, + "pem": 0.9343283582089552, + "pem_stderr": 0.013553937189608768, + "pqem": 0.9522388059701492, + "pqem_stderr": 0.011669098140869312 + }, + "mm|gsm8k_c|0": { + "math_pass@1:1_samples": 0.9002079002079002, + "math_pass@1:1_samples_stderr": 0.013680402391985067 + }, + "all": { + "em": 0.7635928764444374, + "em_stderr": 0.012525278755418427, + "qem": 0.7635928764444374, + "qem_stderr": 0.012525278755418427, + "pem": 0.7635928764444374, + "pem_stderr": 0.012525278755418427, + "pqem": 0.7688855506278509, + "pqem_stderr": 0.01171918213972697, + "math_pass@1:1_samples": 0.8876039501039501, + "math_pass@1:1_samples_stderr": 0.029137265821388654 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/test/0-1k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..3fd9bfe2e58a490f76f026cf5c5e91ff25cfa1a1 --- /dev/null +++ b/test/0-1k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9824561403508771, + "sem_stderr": 0.0065807837808415995 + }, + "mm|truthfulqa|0": { + "sem": 0.9203539823008849, + "sem_stderr": 0.025582941017507042 + }, + "mm|arc_easy|0": { + "sem": 1.0, + "sem_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.9432835820895522, + "sem_stderr": 0.0126561700474127 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9667359667359667, + "math_pass@1:1_samples_stderr": 0.00818503937007462 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "all": { + "sem": 0.7692187409482629, + "sem_stderr": 0.011204973711440335, + "math_pass@1:1_samples": 0.9833679833679834, + "math_pass@1:1_samples_stderr": 0.00409251968503731 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 399, + "mm|truthfulqa|0": 113, + "mm|arc_easy|0": 967, + "mm|commonsenseqa|0": 335, + "mm|math_500|0": 56, + "mm|gsm8k|0": 481, + "mm|gpqa_diamond|0": 1 + } +} \ No newline at end of file diff --git a/test/1-2k/logs/Qwen2.5-14B.log b/test/1-2k/logs/Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..db51a1c3f3c707c49c0cf576367c323cf203dae0 --- /dev/null +++ b/test/1-2k/logs/Qwen2.5-14B.log @@ -0,0 +1,32 @@ +INFO 07-09 16:25:28 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:25:28 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|--------------------|------:|---------------------|-----:|---|-----:| +|all | |em |0.7842|± |0.0636| +| | |qem |0.7842|± |0.0636| +| | |pem |0.7842|± |0.0636| +| | |pqem |0.8343|± |0.0586| +| | |math_pass@1:1_samples|0.6553|± |0.0439| +|mm\|arc_challenge_c\|0| 0|em |0.9037|± |0.0255| +| | |qem |0.9037|± |0.0255| +| | |pem |0.9037|± |0.0255| +| | |pqem |0.9111|± |0.0246| +|mm\|arc_easy_c\|0 | 0|em |0.9172|± |0.0213| +| | |qem |0.9172|± |0.0213| +| | |pem |0.9172|± |0.0213| +| | |pqem |0.9349|± |0.0190| +|mm\|commonsenseqa_c\|0| 0|em |0.8035|± |0.0303| +| | |qem |0.8035|± |0.0303| +| | |pem |0.8035|± |0.0303| +| | |pqem |0.8324|± |0.0285| +|mm\|gpqa_diamond_c\|0 | 1|em |0.5714|± |0.2020| +| | |qem |0.5714|± |0.2020| +| | |pem |0.5714|± |0.2020| +| | |pqem |0.7143|± |0.1844| +|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|0.7211|± |0.0371| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|0.5895|± |0.0507| +|mm\|truthfulqa_c\|0 | 0|em |0.7252|± |0.0392| +| | |qem |0.7252|± |0.0392| +| | |pem |0.7252|± |0.0392| +| | |pqem |0.7786|± |0.0364| + diff --git a/test/1-2k/logs/R1-Qwen2.5-14B.log b/test/1-2k/logs/R1-Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..999c57ef2af2de6e4dbb447171b7d21376d0b679 --- /dev/null +++ b/test/1-2k/logs/R1-Qwen2.5-14B.log @@ -0,0 +1,14 @@ +INFO 07-09 16:23:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:23:25 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.8418|± |0.0574| +| | |math_pass@1:1_samples|0.9694|± |0.0099| +|mm\|arc_challenge\|0| 0|sem |0.9333|± |0.0215| +|mm\|arc_easy\|0 | 0|sem |0.9527|± |0.0164| +|mm\|commonsenseqa\|0| 0|sem |0.7919|± |0.0310| +|mm\|gpqa_diamond\|0 | 2|sem |0.7143|± |0.1844| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9388|± |0.0198| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.8168|± |0.0339| + diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b6156eb622d8933bdc4f0cfb5a8f51ee99328e4e --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73eea6bb87dabfff096940a35ab69aff9ff7e961a00ad4d7fa67d459a4b0c21e +size 9868 diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..197f0ffdceb5afe7811d7530d3a6d57c592300be --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5e0581941155369cf86a88bde8c7584df727695568a12e224908622a58b33e4 +size 70547 diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ba491c029fc8dc2c0c8757c103962db2ba149930 --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e3eb66b51305f76b5b93cca5f1d472680c77b5559aad917e3a459b4a85a532 +size 77262 diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6b1dd7b6e1afa445c886bacad3a058df1310e65c --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9612030c6c65e8dc2e057d118e270644e63798fce1bcb0d039900d4193a3403 +size 55501 diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..03bd25bfb642fa57293a384e0178311482f1a550 --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eee5a9737d6bffe96dcee791676c1cebb916aeddb15a0f692794fdbb4a8a423 +size 29245 diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..72aaf9bb3507a80ce93199167a9f95c07f2cb2c9 --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b97a047a4c6df0d9ee8721865f0993e540f6677912769794cd76752b34a4a5dc +size 469868 diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2619758892a09541151f33e05edba25c771e9877 --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:264a78c6ae085aef916e490d5409f76b1077a2ea239e22be7a107eb6ceeb7fcd +size 850385 diff --git a/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..12f4eb1be2fa984546e4c874045dbecaf1668300 --- /dev/null +++ b/test/1-2k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac17689575b0cd6e93110b92a18056bb07b87c74ca19c8479333b79e4ca634c0 +size 62451 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b6156eb622d8933bdc4f0cfb5a8f51ee99328e4e --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73eea6bb87dabfff096940a35ab69aff9ff7e961a00ad4d7fa67d459a4b0c21e +size 9868 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b5a8fc5bff6751ad56ed4c213af3c72aa26530b5 --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ea6a5475c5531eb0aecad5fb953bbd5246304983b0b91200f27a697b3c0f230 +size 2701240 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54289923517e3a561de47d1ad0baaedfce03809e --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a183134131277fcfdf8625f660e1850e6bff8f7c394c3e9eeb1c01e0369b8669 +size 3287609 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2382ec92efc76d97fddbe8c9c0e66f49674388d5 --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d0b580ebf334950fbc3b5ef9426102781817ba321016de9f92921b05144624f +size 3885869 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..46992ee1e8d7ebd8eb91663f6c40f963dc3f8142 --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37f9a62977bded5f69db866c2513d2c1a90afa218f84201d8abe17ab251a2cd6 +size 194377 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d67914314f98cf10ef39a214af83b1c023a0a2a7 --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33a70c359654a9bf4da28f5b7bd66f463e7829fc49d4eeed6af0302548cc9218 +size 2648218 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e7189644669dca6eb99eca1f3b3c840d04dec6d --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db3a3b614689d6dfd27eaa823aa00ef7b10f27b5f96d5bb3d7996830f5b5d431 +size 1933807 diff --git a/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..555682a65db64e1943be39bc7c9a3b2a9d37c087 --- /dev/null +++ b/test/1-2k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b9bea4281c7c25556273bdf6bcdaa736ec8a4fcf116427e46543c67c7ef498 +size 2799436 diff --git a/test/1-2k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/test/1-2k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..b8a32e1a46611d253ee4f6c4085e4bffeb56a4a3 --- /dev/null +++ b/test/1-2k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,85 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 0.9171597633136095, + "em_stderr": 0.021266132793735532, + "qem": 0.9171597633136095, + "qem_stderr": 0.021266132793735532, + "pem": 0.9171597633136095, + "pem_stderr": 0.021266132793735532, + "pqem": 0.9349112426035503, + "pqem_stderr": 0.01903196106569593 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.5714285714285714, + "em_stderr": 0.20203050891044214, + "qem": 0.5714285714285714, + "qem_stderr": 0.20203050891044214, + "pem": 0.5714285714285714, + "pem_stderr": 0.20203050891044214, + "pqem": 0.7142857142857143, + "pqem_stderr": 0.18442777839082938 + }, + "mm|truthfulqa_c|0": { + "em": 0.7251908396946565, + "em_stderr": 0.03915345408847835, + "qem": 0.7251908396946565, + "qem_stderr": 0.03915345408847835, + "pem": 0.7251908396946565, + "pem_stderr": 0.03915345408847835, + "pqem": 0.7786259541984732, + "pqem_stderr": 0.03641297081313729 + }, + "mm|arc_challenge_c|0": { + "em": 0.9037037037037037, + "em_stderr": 0.02548387251435115, + "qem": 0.9037037037037037, + "qem_stderr": 0.02548387251435115, + "pem": 0.9037037037037037, + "pem_stderr": 0.02548387251435115, + "pqem": 0.9111111111111111, + "pqem_stderr": 0.0245842526835243 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.5894736842105263, + "math_pass@1:1_samples_stderr": 0.05073863564551208 + }, + "mm|commonsenseqa_c|0": { + "em": 0.8034682080924855, + "em_stderr": 0.030299574664788147, + "qem": 0.8034682080924855, + "qem_stderr": 0.030299574664788147, + "pem": 0.8034682080924855, + "pem_stderr": 0.030299574664788147, + "pqem": 0.8323699421965318, + "pqem_stderr": 0.028481963032143377 + }, + "mm|gsm8k_c|0": { + "math_pass@1:1_samples": 0.7210884353741497, + "math_pass@1:1_samples_stderr": 0.03711513959675177 + }, + "all": { + "em": 0.7841902172466052, + "em_stderr": 0.06364670859435907, + "qem": 0.7841902172466052, + "qem_stderr": 0.06364670859435907, + "pem": 0.7841902172466052, + "pem_stderr": 0.06364670859435907, + "pqem": 0.8342607928790761, + "pqem_stderr": 0.05858778519706607, + "math_pass@1:1_samples": 0.655281059792338, + "math_pass@1:1_samples_stderr": 0.04392688762113192 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/test/1-2k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/test/1-2k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..6a87672fe07b8bf57b976d919e9ce44f13e4b69d --- /dev/null +++ b/test/1-2k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,58 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.9333333333333333, + "sem_stderr": 0.02154866450518182 + }, + "mm|truthfulqa|0": { + "sem": 0.816793893129771, + "sem_stderr": 0.033927709264947335 + }, + "mm|arc_easy|0": { + "sem": 0.9526627218934911, + "sem_stderr": 0.01638387349687986 + }, + "mm|commonsenseqa|0": { + "sem": 0.791907514450867, + "sem_stderr": 0.030952890217749895 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9387755102040817, + "math_pass@1:1_samples_stderr": 0.019841158902911944 + }, + "mm|gpqa_diamond|0": { + "sem": 0.7142857142857143, + "sem_stderr": 0.18442777839082938 + }, + "all": { + "sem": 0.8417966354186353, + "sem_stderr": 0.05744818317511766, + "math_pass@1:1_samples": 0.9693877551020409, + "math_pass@1:1_samples_stderr": 0.009920579451455972 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 135, + "mm|truthfulqa|0": 131, + "mm|arc_easy|0": 169, + "mm|commonsenseqa|0": 173, + "mm|math_500|0": 95, + "mm|gsm8k|0": 147, + "mm|gpqa_diamond|0": 7 + } +} \ No newline at end of file diff --git a/test/16-32k/logs/Qwen2.5-14B.log b/test/16-32k/logs/Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..440298b4ecc297d7d42b5e42cc70b9ea41fc9c27 --- /dev/null +++ b/test/16-32k/logs/Qwen2.5-14B.log @@ -0,0 +1,33 @@ +INFO 07-09 16:26:15 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:26:15 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|--------------------|------:|---------------------|-----:|---|-----:| +|all | |em |0.6210|± |0.0913| +| | |qem |0.6210|± |0.0913| +| | |pem |0.6210|± |0.0913| +| | |pqem |0.7047|± |0.0875| +| | |math_pass@1:1_samples|0.2525|± |0.0859| +|mm\|aime24_c\|0 | 3|math_pass@1:1_samples|0.0000|± |0.0000| +|mm\|arc_challenge_c\|0| 0|em |0.8667|± |0.0909| +| | |qem |0.8667|± |0.0909| +| | |pem |0.8667|± |0.0909| +| | |pqem |0.9333|± |0.0667| +|mm\|arc_easy_c\|0 | 0|em |0.9091|± |0.0909| +| | |qem |0.9091|± |0.0909| +| | |pem |0.9091|± |0.0909| +| | |pqem |0.9091|± |0.0909| +|mm\|commonsenseqa_c\|0| 0|em |0.5833|± |0.0833| +| | |qem |0.5833|± |0.0833| +| | |pem |0.5833|± |0.0833| +| | |pqem |0.6111|± |0.0824| +|mm\|gpqa_diamond_c\|0 | 1|em |0.3548|± |0.0874| +| | |qem |0.3548|± |0.0874| +| | |pem |0.3548|± |0.0874| +| | |pqem |0.5484|± |0.0909| +|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|0.6667|± |0.1667| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|0.0909|± |0.0909| +|mm\|truthfulqa_c\|0 | 0|em |0.3913|± |0.1041| +| | |qem |0.3913|± |0.1041| +| | |pem |0.3913|± |0.1041| +| | |pqem |0.5217|± |0.1065| + diff --git a/test/16-32k/logs/R1-Qwen2.5-14B.log b/test/16-32k/logs/R1-Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..ab7af14156184fc679ce0a717c828e2a97c525cc --- /dev/null +++ b/test/16-32k/logs/R1-Qwen2.5-14B.log @@ -0,0 +1,15 @@ +INFO 07-09 16:23:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:23:25 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.0133|± |0.0133| +| | |math_pass@1:1_samples|0.0970|± |0.0970| +|mm\|aime24\|0 | 3|math_pass@1:1_samples|0.2000|± |0.2000| +|mm\|arc_challenge\|0| 0|sem |0.0667|± |0.0667| +|mm\|arc_easy\|0 | 0|sem |0.0000|± |0.0000| +|mm\|commonsenseqa\|0| 0|sem |0.0000|± |0.0000| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000|± |0.0000| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.0000|± |0.0000| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.0909|± |0.0909| +|mm\|truthfulqa\|0 | 0|sem |0.0000|± |0.0000| + diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9a82e4ef23a65a8566239e1e804c5fe7ecf8f99c --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e6659f8fd76b1e3d55da5fced3a9b230a2581c715c90f7bf2961a51fa55d876 +size 1965793 diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..81bef0f0ff40604b099df20690ffbce95196d1b9 --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:507f0db9de638dfc17533a29ce6abf2172182724495a42b0cbc60b6259b01ccb +size 18989 diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8bc9cc753d0300644f1d1b8e287bb19665c85aa9 --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8598af9e3a001f9567325b118f7b59f71ba6da8357e109839c954e1180a22c4 +size 20630 diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ce7be6eae66e6317eeb1615655ad81834b5f2c1f --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6274add4c9a0d9a7ef4dc48534d5c8fc3ce93b6dcfbb6d968b378ed41545ddf3 +size 23820 diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ac1b96040dbec9289d9efeef328369339ca9c54c --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6be81d7a42d6a857e465c3cc40ba1ea170e1d50fe30d258536e62d51409c9c77 +size 35282 diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..97d87f5d19c1563583e7980f1d8bb9190ea1d1ee --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cfbf4ef069c7aa19cd790de1bbd38bc5dfb1529509adc1e8c5fa19c2ec956f2 +size 62752 diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..37b0671fb1913b59fac66ad54f60f6ef39811c17 --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80909752805e9ce021a89c2b94dac4bdecd999ceee1166a7a3968622a8bf0d6d +size 565562 diff --git a/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..491496577171716194bfb72276cade51c5df4605 --- /dev/null +++ b/test/16-32k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8b0c46d4aa7f6c7a57f12567bb3cec2014f4eff23d5dbde729a0c311fb37c48 +size 22707 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1b463e8b2e3e57f7d1fc06b296173116043d2fa3 --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:403a0263f9a8c6cfde39f9f8bd01dfefb588e5fa515a32c2901abff2a03918c3 +size 1493436 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..984801fec1b04a4b4b57d032e908961046c05412 --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1a2ede2ce03ecf240981ef0dc85bfe29d6ef0826bcd6256fcdfff94f8e10b9d +size 6979078 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e0feb0aa1fa3a14a619da24abd66e51c4c44777b --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac4342b943469de5ab146c47d5a08c5f3b081b9a2528ebd56092169969f491a1 +size 5297811 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7d15f3e5a9975f3c34a13f560ab021ee27181482 --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:222bb41e51696004f2eeab8dd6bfeb93846da195815626f6b5e70dabe5399cb7 +size 16839001 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c238af35ca36405193d9429807bd58d198444a28 --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a6eb1f4b3afbbf34d4e7864bea78a4a955179eaa1a1e65fc04ca279121cf720 +size 14158781 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7725662fad53252fde624513b9bf4bf8b122e93e --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78d21ed002554d82542cc3e601908f51cdafa3f7d7c09b602357e208c7428b89 +size 3214104 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0229c5a2bf6d8fc3ffede820f98c4e0e1994e7d9 --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04d74f4b2703abbdc70003b29aad2fc7f96f8e5bb8240c6c1481b826fd81554b +size 3667935 diff --git a/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8569095c238fd6816702ef3bcee0ccf7d88103b8 --- /dev/null +++ b/test/16-32k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cafee0032b040f5a27ec1837f6805c3142a1a77d8f237fc09c51bf90a18cb5c6 +size 10900038 diff --git a/test/16-32k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/test/16-32k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..3c0d1cdbd7db3b06c5a7fa3e65b5e60106bd4ca7 --- /dev/null +++ b/test/16-32k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,89 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 0.9090909090909091, + "em_stderr": 0.0909090909090909, + "qem": 0.9090909090909091, + "qem_stderr": 0.0909090909090909, + "pem": 0.9090909090909091, + "pem_stderr": 0.0909090909090909, + "pqem": 0.9090909090909091, + "pqem_stderr": 0.0909090909090909 + }, + "mm|aime24_c|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.3548387096774194, + "em_stderr": 0.08735525166275225, + "qem": 0.3548387096774194, + "qem_stderr": 0.08735525166275225, + "pem": 0.3548387096774194, + "pem_stderr": 0.08735525166275225, + "pqem": 0.5483870967741935, + "pqem_stderr": 0.09085862440549507 + }, + "mm|truthfulqa_c|0": { + "em": 0.391304347826087, + "em_stderr": 0.10405096111532161, + "qem": 0.391304347826087, + "qem_stderr": 0.10405096111532161, + "pem": 0.391304347826087, + "pem_stderr": 0.10405096111532161, + "pqem": 0.5217391304347826, + "pqem_stderr": 0.10649955403405124 + }, + "mm|arc_challenge_c|0": { + "em": 0.8666666666666667, + "em_stderr": 0.09085135251589957, + "qem": 0.8666666666666667, + "qem_stderr": 0.09085135251589957, + "pem": 0.8666666666666667, + "pem_stderr": 0.09085135251589957, + "pqem": 0.9333333333333333, + "pqem_stderr": 0.06666666666666667 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.09090909090909091, + "math_pass@1:1_samples_stderr": 0.0909090909090909 + }, + "mm|commonsenseqa_c|0": { + "em": 0.5833333333333334, + "em_stderr": 0.08333333333333331, + "qem": 0.5833333333333334, + "qem_stderr": 0.08333333333333331, + "pem": 0.5833333333333334, + "pem_stderr": 0.08333333333333331, + "pqem": 0.6111111111111112, + "pqem_stderr": 0.08240220541217401 + }, + "mm|gsm8k_c|0": { + "math_pass@1:1_samples": 0.6666666666666666, + "math_pass@1:1_samples_stderr": 0.16666666666666666 + }, + "all": { + "em": 0.6210467933188831, + "em_stderr": 0.09129999790727952, + "qem": 0.6210467933188831, + "qem_stderr": 0.09129999790727952, + "pem": 0.6210467933188831, + "pem_stderr": 0.09129999790727952, + "pqem": 0.704732316148866, + "pqem_stderr": 0.08746722828549558, + "math_pass@1:1_samples": 0.25252525252525254, + "math_pass@1:1_samples_stderr": 0.08585858585858586 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/test/16-32k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/test/16-32k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..b998219c52aad8ae39be0726e184cd0bfe03c93c --- /dev/null +++ b/test/16-32k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,63 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.06666666666666667, + "sem_stderr": 0.06666666666666667 + }, + "mm|aime24|0": { + "math_pass@1:1_samples": 0.2, + "math_pass@1:1_samples_stderr": 0.2 + }, + "mm|truthfulqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|arc_easy|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.09090909090909091, + "math_pass@1:1_samples_stderr": 0.09090909090909091 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0, + "sem_stderr": 0.0 + }, + "all": { + "sem": 0.013333333333333332, + "sem_stderr": 0.013333333333333332, + "math_pass@1:1_samples": 0.09696969696969697, + "math_pass@1:1_samples_stderr": 0.09696969696969697 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 15, + "mm|aime24|0": 5, + "mm|truthfulqa|0": 23, + "mm|arc_easy|0": 11, + "mm|commonsenseqa|0": 36, + "mm|math_500|0": 11, + "mm|gsm8k|0": 9, + "mm|gpqa_diamond|0": 31 + } +} \ No newline at end of file diff --git a/test/2-4k/logs/Qwen2.5-14B.log b/test/2-4k/logs/Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..351e26990deb147abade70c0f67f76e4cf551fb6 --- /dev/null +++ b/test/2-4k/logs/Qwen2.5-14B.log @@ -0,0 +1,33 @@ +INFO 07-09 16:26:07 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:26:07 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|--------------------|------:|---------------------|-----:|---|-----:| +|all | |em |0.6466|± |0.0771| +| | |qem |0.6466|± |0.0771| +| | |pem |0.6466|± |0.0771| +| | |pqem |0.7271|± |0.0740| +| | |math_pass@1:1_samples|0.2402|± |0.0635| +|mm\|aime24_c\|0 | 3|math_pass@1:1_samples|0.0000|± |0.0000| +|mm\|arc_challenge_c\|0| 0|em |0.7000|± |0.0851| +| | |qem |0.7000|± |0.0851| +| | |pem |0.7000|± |0.0851| +| | |pqem |0.8333|± |0.0692| +|mm\|arc_easy_c\|0 | 0|em |0.9355|± |0.0449| +| | |qem |0.9355|± |0.0449| +| | |pem |0.9355|± |0.0449| +| | |pqem |0.9355|± |0.0449| +|mm\|commonsenseqa_c\|0| 0|em |0.7636|± |0.0578| +| | |qem |0.7636|± |0.0578| +| | |pem |0.7636|± |0.0578| +| | |pqem |0.7818|± |0.0562| +|mm\|gpqa_diamond_c\|0 | 1|em |0.4118|± |0.1230| +| | |qem |0.4118|± |0.1230| +| | |pem |0.4118|± |0.1230| +| | |pqem |0.5294|± |0.1248| +|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|0.4375|± |0.1281| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|0.2830|± |0.0625| +|mm\|truthfulqa_c\|0 | 0|em |0.4222|± |0.0745| +| | |qem |0.4222|± |0.0745| +| | |pem |0.4222|± |0.0745| +| | |pqem |0.5556|± |0.0749| + diff --git a/test/2-4k/logs/R1-Qwen2.5-14B.log b/test/2-4k/logs/R1-Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..1c69bfa7113b5aca795bb7ecd7d767aaeadd74ca --- /dev/null +++ b/test/2-4k/logs/R1-Qwen2.5-14B.log @@ -0,0 +1,15 @@ +INFO 07-09 16:23:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:23:25 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.6586|± |0.0805| +| | |math_pass@1:1_samples|0.8770|± |0.0506| +|mm\|aime24\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|arc_challenge\|0| 0|sem |0.7333|± |0.0821| +|mm\|arc_easy\|0 | 0|sem |0.7097|± |0.0829| +|mm\|commonsenseqa\|0| 0|sem |0.5818|± |0.0671| +|mm\|gpqa_diamond\|0 | 2|sem |0.8235|± |0.0953| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.6875|± |0.1197| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9434|± |0.0320| +|mm\|truthfulqa\|0 | 0|sem |0.4444|± |0.0749| + diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2b93395cc7ccd5d54df2cd50371bc81d50065468 --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:427fc6e83ba4bc31ebbf29eac6c70b80a48305446096108c9113ed7eded0c197 +size 34382 diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5239ac581e94d58e02d38ba205788b1e8b372b1f --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8658a8bf59a4e34a94c81a4cbed3e8aa4c60b6aedc1a9af30d159da711efb00f +size 27101 diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..499bedda35b528210b8f46b2d7e81dfd8ff8931a --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a8cd2a2376e28faa234ee87748595ca755ed3499bf630899b9278e7de33d0a3 +size 27350 diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..68cfdcf3261679dd628e9a36e128cb38d21f3529 --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8526e2f8fd2aff0285fa2250dbc1b963a5ae98224d143ec8b7ee4dce3b2ff902 +size 28564 diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cc472f3402ea120cea40c42985659b07dacfc145 --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ede1abca0452b6b7f6c6dc6a0785cfee99b70efa11a8138ddcff5dd9e9ee4384 +size 33694 diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c00f92e93bf23fcd2e505db0cdd65228f91f3b51 --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bbf11b20f8583638c22a1cfc49df3029dacbddf49150664f1368f37314f472f +size 83533 diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..709892b83b0046e2412433ce02b1d0dc818aba1e --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:310fe2b4f23dedafeb749af5efc8cf1c5512c56f5a9376db20b016e4dd5edab4 +size 2157661 diff --git a/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..869ca85f8218fb8f319c219ffe215157561bc880 --- /dev/null +++ b/test/2-4k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59b90881073d5bbd078d70a590e61924554e78c523084668c045569148c92b40 +size 32382 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2549ca9ede45ec19278f93c9ae49f7daf36c8939 --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0249c5f76aba8ab71da8aff921493f766ed87d2c495075280f701f92c6a45b73 +size 135313 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..88fd805179945694a248b8af3813f9039ae2170b --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7abf817dc52707de20a842236821673c2c01f6b94fda539ef5d461f3eb03456 +size 1398027 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..763d313e9b89fbed2c189d7ce219cf90887f4bc2 --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3149b718d17744a54261f7a314d6d0c9c27423f33c90191eb97816b6a49855f5 +size 1381089 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e72233cba7147cd6e77e03562fcaa722a03e89f2 --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9426c76b23ba7292ba886b6470b2025c1e5bf9a56dae8a8668f2fef9df4e6e7f +size 2390034 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..63b673f8e0b718271891ad80289448137578dc0f --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c2728a00ae6d193f56ac72842118c14ada031f6f0009e310b8bf39d61048a45 +size 747913 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1158d0144679bf9d93e7d583223e9c6027a74e90 --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ce446a1e2f0ea0dd26521634600912062adceb2f5df340e17f8ede545dd150b +size 687375 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dc9e62351aad36f34d55b970da4f4ddc6c50f8bc --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc72f1b22c05262ad2747d42e0f773075251d2500870932b5f61bcca23d0d739 +size 2210847 diff --git a/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ce4c4f6306007022c43cd892af31e22f4e08782f --- /dev/null +++ b/test/2-4k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66391c65f5b85bde5499bb0415290baa26da557746dd2d11472d885e2108f7c8 +size 1984144 diff --git a/test/2-4k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/test/2-4k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..bc33f72ee408b1ce68b5369e9291ac2e8cf6b4f4 --- /dev/null +++ b/test/2-4k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,89 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 0.9354838709677419, + "em_stderr": 0.04485301852605206, + "qem": 0.9354838709677419, + "qem_stderr": 0.04485301852605206, + "pem": 0.9354838709677419, + "pem_stderr": 0.04485301852605206, + "pqem": 0.9354838709677419, + "pqem_stderr": 0.04485301852605206 + }, + "mm|aime24_c|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.4117647058823529, + "em_stderr": 0.12303823919618757, + "qem": 0.4117647058823529, + "qem_stderr": 0.12303823919618757, + "pem": 0.4117647058823529, + "pem_stderr": 0.12303823919618757, + "pqem": 0.5294117647058824, + "pqem_stderr": 0.12478354962115545 + }, + "mm|truthfulqa_c|0": { + "em": 0.4222222222222222, + "em_stderr": 0.07446027270295806, + "qem": 0.4222222222222222, + "qem_stderr": 0.07446027270295806, + "pem": 0.4222222222222222, + "pem_stderr": 0.07446027270295806, + "pqem": 0.5555555555555556, + "pqem_stderr": 0.07491109582924914 + }, + "mm|arc_challenge_c|0": { + "em": 0.7, + "em_stderr": 0.0850962943396763, + "qem": 0.7, + "qem_stderr": 0.0850962943396763, + "pem": 0.7, + "pem_stderr": 0.0850962943396763, + "pqem": 0.8333333333333334, + "pqem_stderr": 0.06920456654478328 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.2830188679245283, + "math_pass@1:1_samples_stderr": 0.06246832864051477 + }, + "mm|commonsenseqa_c|0": { + "em": 0.7636363636363637, + "em_stderr": 0.05781449705557245, + "qem": 0.7636363636363637, + "qem_stderr": 0.05781449705557245, + "pem": 0.7636363636363637, + "pem_stderr": 0.05781449705557245, + "pqem": 0.7818181818181819, + "pqem_stderr": 0.05620374845754972 + }, + "mm|gsm8k_c|0": { + "math_pass@1:1_samples": 0.4375, + "math_pass@1:1_samples_stderr": 0.128086884574495 + }, + "all": { + "em": 0.6466214325417361, + "em_stderr": 0.07705246436408929, + "qem": 0.6466214325417361, + "qem_stderr": 0.07705246436408929, + "pem": 0.6466214325417361, + "pem_stderr": 0.07705246436408929, + "pqem": 0.727120541276139, + "pqem_stderr": 0.07399119579575794, + "math_pass@1:1_samples": 0.24017295597484276, + "math_pass@1:1_samples_stderr": 0.06351840440500325 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/test/2-4k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/test/2-4k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..9dfeb5782b971f7d717800031c3d12fdf5d1982b --- /dev/null +++ b/test/2-4k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,63 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.7333333333333333, + "sem_stderr": 0.08211756827352532 + }, + "mm|aime24|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|truthfulqa|0": { + "sem": 0.4444444444444444, + "sem_stderr": 0.07491109582924914 + }, + "mm|arc_easy|0": { + "sem": 0.7096774193548387, + "sem_stderr": 0.08287246824945245 + }, + "mm|commonsenseqa|0": { + "sem": 0.5818181818181818, + "sem_stderr": 0.0671242332357016 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.9433962264150944, + "math_pass@1:1_samples_stderr": 0.0320455764380546 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.6875, + "math_pass@1:1_samples_stderr": 0.11967838846954226 + }, + "mm|gpqa_diamond|0": { + "sem": 0.8235294117647058, + "sem_stderr": 0.09530501027070379 + }, + "all": { + "sem": 0.6585605581431008, + "sem_stderr": 0.08046607517172646, + "math_pass@1:1_samples": 0.8769654088050315, + "math_pass@1:1_samples_stderr": 0.05057465496919896 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 30, + "mm|aime24|0": 3, + "mm|truthfulqa|0": 45, + "mm|arc_easy|0": 31, + "mm|commonsenseqa|0": 55, + "mm|math_500|0": 53, + "mm|gsm8k|0": 16, + "mm|gpqa_diamond|0": 17 + } +} \ No newline at end of file diff --git a/test/4-8k/logs/Qwen2.5-14B.log b/test/4-8k/logs/Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..7e4b8a7af822ff919bee6e32b565e08a36945754 --- /dev/null +++ b/test/4-8k/logs/Qwen2.5-14B.log @@ -0,0 +1,33 @@ +INFO 07-09 16:26:02 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:26:02 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|--------------------|------:|---------------------|-----:|---|-----:| +|all | |em |0.6533|± |0.1782| +| | |qem |0.6533|± |0.1782| +| | |pem |0.6533|± |0.1782| +| | |pqem |0.7581|± |0.1111| +| | |math_pass@1:1_samples|0.2469|± |0.0874| +|mm\|aime24_c\|0 | 3|math_pass@1:1_samples|0.0000|± |0.0000| +|mm\|arc_challenge_c\|0| 0|em |0.6667|± |0.3333| +| | |qem |0.6667|± |0.3333| +| | |pem |0.6667|± |0.3333| +| | |pqem |1.0000|± |0.0000| +|mm\|arc_easy_c\|0 | 0|em |1.0000|± |0.0000| +| | |qem |1.0000|± |0.0000| +| | |pem |1.0000|± |0.0000| +| | |pqem |1.0000|± |0.0000| +|mm\|commonsenseqa_c\|0| 0|em |0.6000|± |0.2449| +| | |qem |0.6000|± |0.2449| +| | |pem |0.6000|± |0.2449| +| | |pqem |0.6000|± |0.2449| +|mm\|gpqa_diamond_c\|0 | 1|em |0.4286|± |0.1107| +| | |qem |0.4286|± |0.1107| +| | |pem |0.4286|± |0.1107| +| | |pqem |0.6190|± |0.1086| +|mm\|gsm8k_c\|0 | 0|math_pass@1:1_samples|0.6667|± |0.2108| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|0.0741|± |0.0514| +|mm\|truthfulqa_c\|0 | 0|em |0.5714|± |0.2020| +| | |qem |0.5714|± |0.2020| +| | |pem |0.5714|± |0.2020| +| | |pqem |0.5714|± |0.2020| + diff --git a/test/4-8k/logs/R1-Qwen2.5-14B.log b/test/4-8k/logs/R1-Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..ecda11fe7d49f00d8057d0a76f949520c0010fe3 --- /dev/null +++ b/test/4-8k/logs/R1-Qwen2.5-14B.log @@ -0,0 +1,15 @@ +INFO 07-09 16:23:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:23:25 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5619|± |0.2272| +| | |math_pass@1:1_samples|0.6617|± |0.1602| +|mm\|aime24\|0 | 3|math_pass@1:1_samples|0.8000|± |0.2000| +|mm\|arc_challenge\|0| 0|sem |0.6667|± |0.3333| +|mm\|arc_easy\|0 | 0|sem |0.4000|± |0.2449| +|mm\|commonsenseqa\|0| 0|sem |0.6000|± |0.2449| +|mm\|gpqa_diamond\|0 | 2|sem |0.5714|± |0.1107| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.3333|± |0.2108| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8519|± |0.0697| +|mm\|truthfulqa\|0 | 0|sem |0.5714|± |0.2020| + diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6111a018ac11eaa46d82c76b83de3401917620fb --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eeae225c5115f924700e1c4b06cb54656d1d3bcfc13030f5caab822b17344b4 +size 92736 diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7cceb08d8d01188512f2eb2f171e92e182eb4926 --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f5b3313df0ab250f91ed1493ccac41e516209af48b821df42a21afea6cbcfd6 +size 17674 diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d4620524a77a841972383985bafb74daa5c54140 --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a56ec69afba28125afee1eb1141c648c39d7dbd81dade1c4f00ef2a6beb554b +size 16158 diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d7da333f7ba531269bb911c29a9efdd4522c3ad9 --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13b15bbc502b6f814a299afb41db67faaa6de045135f3840a2028827f245c41e +size 15925 diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b391c4edd4fc331d2e9ca31b600cdb546b5daef8 --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de81353bc88c2fdcf6463943530937837b3779b2845ed4da4c138e9153916250 +size 36598 diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..06a745deab1921f4ffc0fc44a4d6f1804790d567 --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03d62e5d90503e2da3e87bfdb559b1705cf946a21d56cbe7b481cc9b2b3ba170 +size 48509 diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e8fddebdfc6cf176d5535d3b770127c9e86e3d43 --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5356d6182f20ea01ea522e424d82aa678913805d7f8b87d9f4360a8fb0d20ac9 +size 1558776 diff --git a/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1a11dbc59adce1d6cd2959f04e206349a6917507 --- /dev/null +++ b/test/4-8k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:279c295b5fbb13e46673963dc7ac8619bb3414e49469cf8e908623d9ace8570f +size 18611 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2ef95fe9f0410278c984150b03c18f510a3464ed --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:788f62264a626286bbf8bc39430968ee58d6a7582135395b975d111c6b2e73b0 +size 415594 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e5dd0e6ac5764bd2bbc363c4473345afc1df0cdc --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:647b5e920a2213e8a0bee5c88cb031457584a1f45db747fc4a4dd821eb0abd28 +size 255873 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..be1010d9d2ab0ce1da7ec2d1dc5dfbe54cffffee --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbaf534ff826f422eef210096e9e6d47239a93112ade35f6d48f541b510efc66 +size 441620 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e530df62444049d2ff5398213b9076c0c99d4d5 --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fae5cc98f9f5f48fe4d41552bb1d390e92f60d7110a380bdcef1bdec06c5717c +size 407595 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e8c1311c95921cd6c35513ce8c7b3bfe34f32ae1 --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0f5c19ec9f402952b326993532a331eb31f75aa35f2bb280a711b6389c96f99 +size 1801713 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5fb9985fa878aabd01294933cb1caca3ccf3126e --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b702031f1986f7a26efc909cfb1c52573222ffc41b1a941a7371ed3f2bb2b6 +size 501370 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4d10daba411594e3849fbba3d2209366f8951ffa --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f950208a9b6d0ae2b92ab8d32acead4f57a09adf619783f6cdaab3149e68c821 +size 1982985 diff --git a/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..240325047ade8189e46b5882540842232af4014f --- /dev/null +++ b/test/4-8k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:962486b6efc31c3ee8e13c52f2791dcd2c5383f61ee30f871c4aa9d9b64ea8e1 +size 620854 diff --git a/test/4-8k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/test/4-8k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..ad90fa2a8f6950d21ccd6446aa69000955020fef --- /dev/null +++ b/test/4-8k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,89 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 1.0, + "em_stderr": 0.0, + "qem": 1.0, + "qem_stderr": 0.0, + "pem": 1.0, + "pem_stderr": 0.0, + "pqem": 1.0, + "pqem_stderr": 0.0 + }, + "mm|aime24_c|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.42857142857142855, + "em_stderr": 0.11065666703449763, + "qem": 0.42857142857142855, + "qem_stderr": 0.11065666703449763, + "pem": 0.42857142857142855, + "pem_stderr": 0.11065666703449763, + "pqem": 0.6190476190476191, + "pqem_stderr": 0.10858813572372741 + }, + "mm|truthfulqa_c|0": { + "em": 0.5714285714285714, + "em_stderr": 0.20203050891044214, + "qem": 0.5714285714285714, + "qem_stderr": 0.20203050891044214, + "pem": 0.5714285714285714, + "pem_stderr": 0.20203050891044214, + "pqem": 0.5714285714285714, + "pqem_stderr": 0.20203050891044214 + }, + "mm|arc_challenge_c|0": { + "em": 0.6666666666666666, + "em_stderr": 0.33333333333333337, + "qem": 0.6666666666666666, + "qem_stderr": 0.33333333333333337, + "pem": 0.6666666666666666, + "pem_stderr": 0.33333333333333337, + "pqem": 1.0, + "pqem_stderr": 0.0 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.07407407407407407, + "math_pass@1:1_samples_stderr": 0.05136112928011382 + }, + "mm|commonsenseqa_c|0": { + "em": 0.6, + "em_stderr": 0.24494897427831783, + "qem": 0.6, + "qem_stderr": 0.24494897427831783, + "pem": 0.6, + "pem_stderr": 0.24494897427831783, + "pqem": 0.6, + "pqem_stderr": 0.24494897427831783 + }, + "mm|gsm8k_c|0": { + "math_pass@1:1_samples": 0.6666666666666666, + "math_pass@1:1_samples_stderr": 0.210818510677892 + }, + "all": { + "em": 0.6533333333333333, + "em_stderr": 0.1781938967113182, + "qem": 0.6533333333333333, + "qem_stderr": 0.1781938967113182, + "pem": 0.6533333333333333, + "pem_stderr": 0.1781938967113182, + "pqem": 0.7580952380952382, + "pqem_stderr": 0.11111352378249748, + "math_pass@1:1_samples": 0.24691358024691357, + "math_pass@1:1_samples_stderr": 0.08739321331933526 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/test/4-8k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/test/4-8k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..20c0f5aa29d6c0cf144297495e0830cb7fb9642f --- /dev/null +++ b/test/4-8k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,63 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.6666666666666666, + "sem_stderr": 0.33333333333333337 + }, + "mm|aime24|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.20000000000000004 + }, + "mm|truthfulqa|0": { + "sem": 0.5714285714285714, + "sem_stderr": 0.20203050891044214 + }, + "mm|arc_easy|0": { + "sem": 0.4, + "sem_stderr": 0.24494897427831783 + }, + "mm|commonsenseqa|0": { + "sem": 0.6, + "sem_stderr": 0.24494897427831783 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8518518518518519, + "math_pass@1:1_samples_stderr": 0.06966962541673782 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.3333333333333333, + "math_pass@1:1_samples_stderr": 0.210818510677892 + }, + "mm|gpqa_diamond|0": { + "sem": 0.5714285714285714, + "sem_stderr": 0.11065666703449763 + }, + "all": { + "sem": 0.5619047619047619, + "sem_stderr": 0.22718369156698176, + "math_pass@1:1_samples": 0.6617283950617284, + "math_pass@1:1_samples_stderr": 0.1601627120315433 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 3, + "mm|aime24|0": 5, + "mm|truthfulqa|0": 7, + "mm|arc_easy|0": 5, + "mm|commonsenseqa|0": 5, + "mm|math_500|0": 27, + "mm|gsm8k|0": 6, + "mm|gpqa_diamond|0": 21 + } +} \ No newline at end of file diff --git a/test/8-16k/logs/Qwen2.5-14B.log b/test/8-16k/logs/Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..85280ae3f40bdbfa018a25a75adee152d3e136e1 --- /dev/null +++ b/test/8-16k/logs/Qwen2.5-14B.log @@ -0,0 +1,32 @@ +INFO 07-09 16:25:29 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:25:30 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|--------------------|------:|---------------------|-----:|---|-----:| +|all | |em |0.7414|± |0.1092| +| | |qem |0.7414|± |0.1092| +| | |pem |0.7414|± |0.1092| +| | |pqem |0.7596|± |0.1103| +| | |math_pass@1:1_samples|0.0000|± |0.0000| +|mm\|aime24_c\|0 | 3|math_pass@1:1_samples|0.0000|± |0.0000| +|mm\|arc_challenge_c\|0| 0|em |1.0000|± |0.0000| +| | |qem |1.0000|± |0.0000| +| | |pem |1.0000|± |0.0000| +| | |pqem |1.0000|± |0.0000| +|mm\|arc_easy_c\|0 | 0|em |0.8333|± |0.1667| +| | |qem |0.8333|± |0.1667| +| | |pem |0.8333|± |0.1667| +| | |pqem |0.8333|± |0.1667| +|mm\|commonsenseqa_c\|0| 0|em |0.8889|± |0.1111| +| | |qem |0.8889|± |0.1111| +| | |pem |0.8889|± |0.1111| +| | |pqem |0.8889|± |0.1111| +|mm\|gpqa_diamond_c\|0 | 1|em |0.3182|± |0.1016| +| | |qem |0.3182|± |0.1016| +| | |pem |0.3182|± |0.1016| +| | |pqem |0.4091|± |0.1073| +|mm\|math_500_c\|0 | 3|math_pass@1:1_samples|0.0000|± |0.0000| +|mm\|truthfulqa_c\|0 | 0|em |0.6667|± |0.1667| +| | |qem |0.6667|± |0.1667| +| | |pem |0.6667|± |0.1667| +| | |pqem |0.6667|± |0.1667| + diff --git a/test/8-16k/logs/R1-Qwen2.5-14B.log b/test/8-16k/logs/R1-Qwen2.5-14B.log new file mode 100644 index 0000000000000000000000000000000000000000..d23407e0204cd6454f842aa251a88887eaa8dfd1 --- /dev/null +++ b/test/8-16k/logs/R1-Qwen2.5-14B.log @@ -0,0 +1,14 @@ +INFO 07-09 16:23:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 07-09 16:23:25 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5166|± |0.1689| +| | |math_pass@1:1_samples|0.7778|± |0.0878| +|mm\|aime24\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|arc_challenge\|0| 0|sem |0.8000|± |0.2000| +|mm\|arc_easy\|0 | 0|sem |0.6667|± |0.2108| +|mm\|commonsenseqa\|0| 0|sem |0.5556|± |0.1757| +|mm\|gpqa_diamond\|0 | 2|sem |0.2273|± |0.0914| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.5556|± |0.1757| +|mm\|truthfulqa\|0 | 0|sem |0.3333|± |0.1667| + diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5604f1186a582d726a12059171204e35c6414b13 --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|aime24_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d242262c0c43dc7095dcfec73cd33d957ca6e3b1b3e9d1bd8eee413bee3836a +size 41290 diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..406ceac9dc255e1a417325704e9643d2fffb6fa4 --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_challenge_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d73db9a2f1224acbc7136322012741706097efe581cd3567cb5923df6ac3ad6a +size 15227 diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5bda07b00cd567a22c1319958d78969e05d81f49 --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|arc_easy_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84d4046b86804d685154d905536ae2e38295b26cfe996c026b12ab711f173245 +size 17607 diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..74302dc72a8897661904e13e96a6333f2d3b8abb --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|commonsenseqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0490a4b1e7b454a47a0dfe0fe2136fe859d91db92501ba59c8bbd831002c66af +size 16841 diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e2f809b19f95a077801430b77f9ce424015fe38 --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gpqa_diamond_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:051d7168ee09403b569d1860d2a4ceecc52a837528dc638d1ab9a83267019db7 +size 44292 diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b6156eb622d8933bdc4f0cfb5a8f51ee99328e4e --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|gsm8k_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73eea6bb87dabfff096940a35ab69aff9ff7e961a00ad4d7fa67d459a4b0c21e +size 9868 diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..49d2476688fea4ab79d68216d2fb87ea3d4b6ae6 --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|math_500_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5483f942c29d31f7bc8f9d4cea68def78e4a71a0aa7e46431c2685fa3987051b +size 1018971 diff --git a/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet new file mode 100644 index 0000000000000000000000000000000000000000..515e5446cf024d998a634135d890a10eec9adebb --- /dev/null +++ b/test/8-16k/outputs/._models_Qwen2.5-14B/2025-07-09T02-35-11.293890/outputs_mm|truthfulqa_c|0_2025-07-09T02-35-11.293890.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b85e8370c85c5eec9c4aaa91238bce0159cea39fe8443aaaaf50f601baa656c +size 19110 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3d26d2e5e346f6943f22e9ad35191275c1ed0daf --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|aime24|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a047e2aee89e0a3e3b7242baef9b46b57efc277cb038a4ce291d0ee328890336 +size 395292 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..02978e1000ecba446fd197e6c5ddb445eca7c75e --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_challenge|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0b961b272fe91d31848c7cbafef5d0b29c28a3140835c87577c916a98e35dfe +size 950491 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..188f612bcac2a65c779de8b3954e82130baf29cf --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|arc_easy|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:555476f4406ffdb108926778f303a83edaa90fe5a47c826bcc1d316bd00608ad +size 1191673 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1efc32492b952ab5d77649e8bae65b22f6e618a4 --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|commonsenseqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bc6074d4aa58636353dcc831577bdf54f4602c292cdc7ca06eefe7f796dc182 +size 1783719 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6cb829af6b17c42ee0934be7be2647a4ff5780ec --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gpqa_diamond|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8620188be737b491769b81cfc85222c781d99ddd10223825ae1f1290b1b7e012 +size 3858045 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b6156eb622d8933bdc4f0cfb5a8f51ee99328e4e --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|gsm8k|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73eea6bb87dabfff096940a35ab69aff9ff7e961a00ad4d7fa67d459a4b0c21e +size 9868 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..25b2e3413dd4cb7476fbccfeb0129e12809f8e3d --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|math_500|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b99d04cd16e622295ea62bb44b2ba680617135d0dd80102009845be40b04719a +size 1536435 diff --git a/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet new file mode 100644 index 0000000000000000000000000000000000000000..502a5f7558febe217a69803b44b1149686d6f8d5 --- /dev/null +++ b/test/8-16k/outputs/._models_R1-Qwen2.5-14B/2025-07-09T04-07-37.799889/outputs_mm|truthfulqa|0_2025-07-09T04-07-37.799889.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81099867abb992a73b9713d328b170692fd71ffe074ebf68fa63f34b3da11036 +size 1672678 diff --git a/test/8-16k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json b/test/8-16k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json new file mode 100644 index 0000000000000000000000000000000000000000..602efcddf64777e3f1f8c001a780a5dc7ce404e0 --- /dev/null +++ b/test/8-16k/results/._models_Qwen2.5-14B/results_2025-07-09T02-35-11.293890.json @@ -0,0 +1,85 @@ +{ + "results": { + "mm|arc_easy_c|0": { + "em": 0.8333333333333334, + "em_stderr": 0.16666666666666669, + "qem": 0.8333333333333334, + "qem_stderr": 0.16666666666666669, + "pem": 0.8333333333333334, + "pem_stderr": 0.16666666666666669, + "pqem": 0.8333333333333334, + "pqem_stderr": 0.16666666666666669 + }, + "mm|aime24_c|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|gpqa_diamond_c|0": { + "em": 0.3181818181818182, + "em_stderr": 0.10163945352271771, + "qem": 0.3181818181818182, + "qem_stderr": 0.10163945352271771, + "pem": 0.3181818181818182, + "pem_stderr": 0.10163945352271771, + "pqem": 0.4090909090909091, + "pqem_stderr": 0.10729033533674223 + }, + "mm|truthfulqa_c|0": { + "em": 0.6666666666666666, + "em_stderr": 0.16666666666666666, + "qem": 0.6666666666666666, + "qem_stderr": 0.16666666666666666, + "pem": 0.6666666666666666, + "pem_stderr": 0.16666666666666666, + "pqem": 0.6666666666666666, + "pqem_stderr": 0.16666666666666666 + }, + "mm|arc_challenge_c|0": { + "em": 1.0, + "em_stderr": 0.0, + "qem": 1.0, + "qem_stderr": 0.0, + "pem": 1.0, + "pem_stderr": 0.0, + "pqem": 1.0, + "pqem_stderr": 0.0 + }, + "mm|math_500_c|0": { + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|commonsenseqa_c|0": { + "em": 0.8888888888888888, + "em_stderr": 0.11111111111111115, + "qem": 0.8888888888888888, + "qem_stderr": 0.11111111111111115, + "pem": 0.8888888888888888, + "pem_stderr": 0.11111111111111115, + "pqem": 0.8888888888888888, + "pqem_stderr": 0.11111111111111115 + }, + "all": { + "em": 0.7414141414141414, + "em_stderr": 0.10921677959343243, + "qem": 0.7414141414141414, + "qem_stderr": 0.10921677959343243, + "pem": 0.7414141414141414, + "pem_stderr": 0.10921677959343243, + "pqem": 0.7595959595959596, + "pqem_stderr": 0.11034695595623734, + "math_pass@1:1_samples": 0.0, + "math_pass@1:1_samples_stderr": 0.0 + } + }, + "versions": { + "mm|aime24_c|0": 3, + "mm|arc_challenge_c|0": 0, + "mm|arc_easy_c|0": 0, + "mm|commonsenseqa_c|0": 0, + "mm|gpqa_diamond_c|0": 1, + "mm|gsm8k_c|0": 0, + "mm|math_500_c|0": 3, + "mm|mmlu_pro_c|0": 0, + "mm|truthfulqa_c|0": 0 + } +} \ No newline at end of file diff --git a/test/8-16k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json b/test/8-16k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json new file mode 100644 index 0000000000000000000000000000000000000000..50164114e1bf02a2edf7bc2764488da4d4c745e0 --- /dev/null +++ b/test/8-16k/results/._models_R1-Qwen2.5-14B/results_2025-07-09T04-07-37.799889.json @@ -0,0 +1,58 @@ +{ + "results": { + "mm|arc_challenge|0": { + "sem": 0.8, + "sem_stderr": 0.19999999999999998 + }, + "mm|aime24|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|truthfulqa|0": { + "sem": 0.3333333333333333, + "sem_stderr": 0.16666666666666666 + }, + "mm|arc_easy|0": { + "sem": 0.6666666666666666, + "sem_stderr": 0.210818510677892 + }, + "mm|commonsenseqa|0": { + "sem": 0.5555555555555556, + "sem_stderr": 0.17568209223157663 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.5555555555555556, + "math_pass@1:1_samples_stderr": 0.17568209223157663 + }, + "mm|gpqa_diamond|0": { + "sem": 0.22727272727272727, + "sem_stderr": 0.09144861547306322 + }, + "all": { + "sem": 0.5165656565656565, + "sem_stderr": 0.1689231770098397, + "math_pass@1:1_samples": 0.7777777777777778, + "math_pass@1:1_samples_stderr": 0.08784104611578832 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|arc_challenge|0": 5, + "mm|aime24|0": 3, + "mm|truthfulqa|0": 9, + "mm|arc_easy|0": 6, + "mm|commonsenseqa|0": 9, + "mm|math_500|0": 9, + "mm|gpqa_diamond|0": 22 + } +} \ No newline at end of file