diff --git a/README.MD b/README.MD new file mode 100644 index 0000000000000000000000000000000000000000..f6ba417a3755b0c0a983c7b6a653772959584644 --- /dev/null +++ b/README.MD @@ -0,0 +1,12 @@ +# Description +`./test/0-1k`, `./merge_bench/` and `./merge_bench1/` have same eval data. +The data split includes math_tasks and mcq_tasks. +``` +math_tasks = ["mm|aime24|0", "mm|math_500|0", "mm|gsm8k|0"] +mcq_tasks = ["mm|mmlu_pro|0", "mm|truthfulqa|0", "mm|commonsenseqa|0", "mm|arc_easy|0", "mm|arc_challenge|0", "mm|gpqa_diamond|0"] +``` + +And those only contain data samples whose generation length < 1k from respective reasoning model, e.g. DS-R1-Llama3 and Phi4-mini-reasoning. But currently all sample is from phi4-mini-reasoning + +The difference between `./merge_bench/` and `./merge_bench1/` is `./merge_bench1/` merged all layers of Phi4, while `./merge_bench/` missed `lm_head`. +Note that the series of Llama in `./merge_bench/` is reaasonable, since those are merged by `mergekit`. \ No newline at end of file diff --git a/merge_bench/logs/llama_darelinear_1.log b/merge_bench/logs/llama_darelinear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..c56d922cfa4880a9556bf1297c88ab441469ad47 --- /dev/null +++ b/merge_bench/logs/llama_darelinear_1.log @@ -0,0 +1,96 @@ +INFO 06-28 18:47:54 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 18:47:56 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 18:48:03 [config.py:717] This model supports multiple tasks: {'classify', 'score', 'reward', 'embed', 'generate'}. Defaulting to 'generate'. +INFO 06-28 18:48:03 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 18:48:03 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 18:48:05 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 18:48:05 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 18:48:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_06919893'), local_subscribe_addr='ipc:///tmp/d4f9c938-0474-4c85-8776-76fae2cfb900', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 18:48:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 18:48:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_607805b6'), local_subscribe_addr='ipc:///tmp/446ff4e9-7682-40ee-a3fc-0784e08ffb01', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 18:48:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 18:48:05 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0e41e491'), local_subscribe_addr='ipc:///tmp/d8e738b3-c034-45e6-b1c5-2dfc295238ed', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f2d47f78'), local_subscribe_addr='ipc:///tmp/fa2c9b8a-3b1c-4803-b18c-24205bbd5985', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5a2a3f4f'), local_subscribe_addr='ipc:///tmp/730a59de-f5a9-4c2f-a5ea-44ed30623ac6', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:07 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:07 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:07 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:07 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:07 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:07 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:07 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:07 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3598716) WARNING 06-28 18:48:08 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3598717) WARNING 06-28 18:48:08 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3598715) WARNING 06-28 18:48:08 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3598714) WARNING 06-28 18:48:08 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_1ad62c91'), local_subscribe_addr='ipc:///tmp/01e2f2dc-b1dd-4a71-b920-27675c6a453e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:08 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:08 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:08 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:08 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:08 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:08 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:08 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:08 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3598716) WARNING 06-28 18:48:08 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3598717) WARNING 06-28 18:48:08 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3598715) WARNING 06-28 18:48:08 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3598714) WARNING 06-28 18:48:08 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:08 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:08 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:08 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:08 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:13 [loader.py:458] Loading weights took 4.51 seconds +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:13 [loader.py:458] Loading weights took 4.51 seconds +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:13 [loader.py:458] Loading weights took 4.52 seconds +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:13 [loader.py:458] Loading weights took 4.51 seconds +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:13 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.901312 seconds +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:13 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.908657 seconds +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:13 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.896791 seconds +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:13 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 4.908974 seconds +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:20 [backends.py:430] Dynamo bytecode transform time: 7.13 s +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:20 [backends.py:430] Dynamo bytecode transform time: 7.13 s +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:20 [backends.py:430] Dynamo bytecode transform time: 7.13 s +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:20 [backends.py:430] Dynamo bytecode transform time: 7.13 s +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.435 s +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.433 s +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.445 s +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.826 s +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:31 [monitor.py:33] torch.compile takes 7.13 s in total +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:31 [monitor.py:33] torch.compile takes 7.13 s in total +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:31 [monitor.py:33] torch.compile takes 7.13 s in total +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:31 [monitor.py:33] torch.compile takes 7.13 s in total +INFO 06-28 18:48:33 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 18:48:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 18:48:33 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 18:48:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 18:48:33 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 18:48:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 18:48:33 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 18:48:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3598714) INFO 06-28 18:48:56 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3598717) INFO 06-28 18:48:56 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3598716) INFO 06-28 18:48:56 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3598715) INFO 06-28 18:48:56 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +INFO 06-28 18:48:56 [core.py:159] init engine (profile, create kv cache, warmup model) took 42.92 seconds +INFO 06-28 18:48:56 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 19:01:31 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 19:01:31 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5201|± |0.0281| +| | |math_pass@1:1_samples|0.7488|± |0.0440| +|mm\|arc_challenge\|0| 0|sem |0.6010|± |0.0251| +|mm\|arc_easy\|0 | 0|sem |0.6304|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4938|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7226|± |0.0212| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3554|± |0.0437| + diff --git a/merge_bench/logs/llama_darelinear_3.log b/merge_bench/logs/llama_darelinear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..c2c2f26ecbab60f406779bb384d5dac7af18147d --- /dev/null +++ b/merge_bench/logs/llama_darelinear_3.log @@ -0,0 +1,96 @@ +INFO 06-28 19:01:30 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 19:01:32 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 19:01:39 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'reward', 'embed', 'classify'}. Defaulting to 'generate'. +INFO 06-28 19:01:39 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 19:01:39 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 19:01:40 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 19:01:40 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 19:01:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_64032cd1'), local_subscribe_addr='ipc:///tmp/45893f5d-8e26-4aa9-9824-5b019d5989cf', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:01:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 19:01:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4ee5119d'), local_subscribe_addr='ipc:///tmp/6e2ed57a-78ff-4635-b216-0cc45fbb3fd6', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8718327d'), local_subscribe_addr='ipc:///tmp/5be42818-9ba4-4977-b053-4709c5ac33b7', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:01:40 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 19:01:41 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_175f72fa'), local_subscribe_addr='ipc:///tmp/987bf39f-efca-4e1f-a76e-9fd79a0830a7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_37f6e3ef'), local_subscribe_addr='ipc:///tmp/bc5d6131-6a85-46be-9c6b-7a2502f865ec', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:52 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:52 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:52 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:52 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:52 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:52 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:52 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:52 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3603787) WARNING 06-28 19:01:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3603788) WARNING 06-28 19:01:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3603786) WARNING 06-28 19:01:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3603785) WARNING 06-28 19:01:53 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:53 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_01e6e72a'), local_subscribe_addr='ipc:///tmp/867fb721-a28c-49e2-a558-3ce978a6e3f7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:53 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:53 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:53 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:53 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:53 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:53 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3603787) WARNING 06-28 19:01:53 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3603788) WARNING 06-28 19:01:53 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:53 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:53 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3603786) WARNING 06-28 19:01:53 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3603785) WARNING 06-28 19:01:53 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:53 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:53 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:53 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:53 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:54 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:54 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:54 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:54 [loader.py:458] Loading weights took 0.77 seconds +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:01:54 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.871916 seconds +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:01:54 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.871097 seconds +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:01:54 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.927502 seconds +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:01:54 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.986968 seconds +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:02:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:02:00 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:02:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:02:00 [backends.py:430] Dynamo bytecode transform time: 5.73 s +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:02:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:02:00 [backends.py:430] Dynamo bytecode transform time: 5.78 s +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:02:00 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:02:00 [backends.py:430] Dynamo bytecode transform time: 5.83 s +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:02:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.400 s +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:02:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.352 s +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:02:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.368 s +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:02:05 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.440 s +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:02:11 [monitor.py:33] torch.compile takes 5.78 s in total +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:02:11 [monitor.py:33] torch.compile takes 5.67 s in total +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:02:11 [monitor.py:33] torch.compile takes 5.73 s in total +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:02:11 [monitor.py:33] torch.compile takes 5.83 s in total +INFO 06-28 19:02:12 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 19:02:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 19:02:12 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:02:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:02:12 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:02:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:02:12 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 19:02:12 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=1 pid=3603786) INFO 06-28 19:02:36 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3603785) INFO 06-28 19:02:36 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3603788) INFO 06-28 19:02:36 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3603787) INFO 06-28 19:02:36 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +INFO 06-28 19:02:36 [core.py:159] init engine (profile, create kv cache, warmup model) took 41.72 seconds +INFO 06-28 19:02:37 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 19:15:24 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 19:15:24 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5004|± |0.0274| +| | |math_pass@1:1_samples|0.8055|± |0.0369| +|mm\|arc_challenge\|0| 0|sem |0.6037|± |0.0251| +|mm\|arc_easy\|0 | 0|sem |0.6315|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4938|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7360|± |0.0209| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8750|± |0.0530| +|mm\|truthfulqa\|0 | 0|sem |0.2727|± |0.0407| + diff --git a/merge_bench/logs/llama_darelinear_5.log b/merge_bench/logs/llama_darelinear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..f8dfa20472629a02968024c2741f3a49ffac7b13 --- /dev/null +++ b/merge_bench/logs/llama_darelinear_5.log @@ -0,0 +1,96 @@ +INFO 06-28 19:15:23 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 19:15:24 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 19:15:31 [config.py:717] This model supports multiple tasks: {'embed', 'reward', 'classify', 'score', 'generate'}. Defaulting to 'generate'. +INFO 06-28 19:15:31 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 19:15:31 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 19:15:33 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 19:15:33 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 19:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_10577a21'), local_subscribe_addr='ipc:///tmp/004d8b89-cc85-469e-a0a1-eba5bc07a552', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_558e8955'), local_subscribe_addr='ipc:///tmp/f7c82864-a4a8-4c22-8842-dc5a67f67a87', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_27b3cabf'), local_subscribe_addr='ipc:///tmp/b6a9ea73-5188-4e6a-950a-8c81376233d5', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 19:15:33 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c356ca19'), local_subscribe_addr='ipc:///tmp/639cd23e-c9cf-4e49-b6d2-dac211cfea8e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:33 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_db80b59c'), local_subscribe_addr='ipc:///tmp/90eb8b9b-bfd8-42a1-ba6c-6cb01a8bd850', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:35 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:35 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3609849) WARNING 06-28 19:15:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3609848) WARNING 06-28 19:15:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3609846) WARNING 06-28 19:15:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3609847) WARNING 06-28 19:15:36 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_4a6645ac'), local_subscribe_addr='ipc:///tmp/96d20b0c-d027-4c6e-a850-071c00e81e80', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:36 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:36 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:36 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3609849) WARNING 06-28 19:15:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3609848) WARNING 06-28 19:15:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3609846) WARNING 06-28 19:15:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:36 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:36 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:36 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:36 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3609847) WARNING 06-28 19:15:36 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:36 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:36 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:37 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:37 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:37 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:37 [loader.py:458] Loading weights took 0.76 seconds +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:37 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.878368 seconds +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:37 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.874506 seconds +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:37 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.975043 seconds +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:37 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.928218 seconds +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:43 [backends.py:430] Dynamo bytecode transform time: 5.53 s +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:43 [backends.py:430] Dynamo bytecode transform time: 5.55 s +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:43 [backends.py:430] Dynamo bytecode transform time: 5.57 s +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:43 [backends.py:430] Dynamo bytecode transform time: 5.58 s +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.393 s +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.397 s +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.404 s +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.454 s +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:15:54 [monitor.py:33] torch.compile takes 5.57 s in total +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:15:54 [monitor.py:33] torch.compile takes 5.55 s in total +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:15:54 [monitor.py:33] torch.compile takes 5.53 s in total +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:15:54 [monitor.py:33] torch.compile takes 5.58 s in total +INFO 06-28 19:15:55 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 19:15:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 19:15:55 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:15:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:15:55 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:15:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:15:55 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 19:15:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3609849) INFO 06-28 19:16:19 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3609848) INFO 06-28 19:16:19 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3609847) INFO 06-28 19:16:19 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3609846) INFO 06-28 19:16:19 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +INFO 06-28 19:16:19 [core.py:159] init engine (profile, create kv cache, warmup model) took 41.19 seconds +INFO 06-28 19:16:19 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 19:28:57 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 19:28:57 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5105|± |0.0280| +| | |math_pass@1:1_samples|0.7999|± |0.0371| +|mm\|arc_challenge\|0| 0|sem |0.5853|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6336|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4844|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7248|± |0.0211| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8750|± |0.0530| +|mm\|truthfulqa\|0 | 0|sem |0.3388|± |0.0432| + diff --git a/merge_bench/logs/llama_darelinear_7.log b/merge_bench/logs/llama_darelinear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..791cc5c518b1699a1f254214ed1245a4c3685e62 --- /dev/null +++ b/merge_bench/logs/llama_darelinear_7.log @@ -0,0 +1,96 @@ +INFO 06-28 19:28:56 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 19:28:57 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 19:29:04 [config.py:717] This model supports multiple tasks: {'reward', 'score', 'classify', 'generate', 'embed'}. Defaulting to 'generate'. +INFO 06-28 19:29:04 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 19:29:04 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 19:29:06 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 19:29:06 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 19:29:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_26ef43fb'), local_subscribe_addr='ipc:///tmp/1503da60-c19b-48f3-9809-e34d8853a309', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:29:06 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_11981bd0'), local_subscribe_addr='ipc:///tmp/9b1f1f4f-9671-425c-ae4d-18e28195a4bc', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:29:06 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 19:29:06 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2ab12ec2'), local_subscribe_addr='ipc:///tmp/9c3831dc-0da1-47c5-92b2-caa01026898b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:29:06 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_26bbf412'), local_subscribe_addr='ipc:///tmp/a3d59c06-7d41-4866-ad94-b254fd1dee6e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2e0f0ae9'), local_subscribe_addr='ipc:///tmp/1cbf6108-c340-4068-a696-3ce96130e9fb', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3613747) WARNING 06-28 19:29:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3613746) WARNING 06-28 19:29:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3613742) WARNING 06-28 19:29:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3613743) WARNING 06-28 19:29:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:13 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_1c6a8c3a'), local_subscribe_addr='ipc:///tmp/4e1b6783-859b-428c-a617-d9ff90c87a4f', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:13 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3613742) WARNING 06-28 19:29:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:13 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:13 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:13 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3613747) WARNING 06-28 19:29:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3613746) WARNING 06-28 19:29:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3613743) WARNING 06-28 19:29:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:14 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:14 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:14 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:14 [loader.py:458] Loading weights took 0.77 seconds +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:14 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.905328 seconds +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:14 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.900747 seconds +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:14 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.926662 seconds +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:15 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.000593 seconds +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:20 [backends.py:430] Dynamo bytecode transform time: 5.62 s +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:20 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:21 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:21 [backends.py:430] Dynamo bytecode transform time: 5.85 s +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:21 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:21 [backends.py:430] Dynamo bytecode transform time: 5.92 s +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.372 s +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.360 s +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.434 s +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.386 s +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:31 [monitor.py:33] torch.compile takes 5.68 s in total +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:31 [monitor.py:33] torch.compile takes 5.92 s in total +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:31 [monitor.py:33] torch.compile takes 5.85 s in total +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:31 [monitor.py:33] torch.compile takes 5.62 s in total +INFO 06-28 19:29:33 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 19:29:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 19:29:33 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:29:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:29:33 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:29:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:29:33 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 19:29:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3613747) INFO 06-28 19:29:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3613743) INFO 06-28 19:29:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3613746) INFO 06-28 19:29:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3613742) INFO 06-28 19:29:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 19:29:58 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.73 seconds +INFO 06-28 19:29:59 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 19:42:40 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 19:42:40 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5357|± |0.0281| +| | |math_pass@1:1_samples|0.7499|± |0.0440| +|mm\|arc_challenge\|0| 0|sem |0.5984|± |0.0251| +|mm\|arc_easy\|0 | 0|sem |0.6452|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5437|± |0.0279| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7248|± |0.0211| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3554|± |0.0437| + diff --git a/merge_bench/logs/llama_darelinear_9.log b/merge_bench/logs/llama_darelinear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..cc19e523f2e9d1cd21818912b8944d8a6c288335 --- /dev/null +++ b/merge_bench/logs/llama_darelinear_9.log @@ -0,0 +1,96 @@ +INFO 06-28 19:42:39 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 19:42:41 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 19:42:48 [config.py:717] This model supports multiple tasks: {'score', 'embed', 'classify', 'generate', 'reward'}. Defaulting to 'generate'. +INFO 06-28 19:42:48 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 19:42:48 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 19:42:50 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 19:42:50 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 19:42:50 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_c90e6d0c'), local_subscribe_addr='ipc:///tmp/966a24d0-22af-4b35-b61c-287d01dabdde', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:42:50 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:50 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6dc61b5f'), local_subscribe_addr='ipc:///tmp/e48c70ef-ba23-4cb5-91df-362ff41efa0d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:42:50 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:50 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1646518f'), local_subscribe_addr='ipc:///tmp/b6ee7bc0-17bb-4f38-b44b-ec7473a9d4bb', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:42:50 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:50 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5fc7d511'), local_subscribe_addr='ipc:///tmp/dc1587a5-cd34-4d51-9ef8-72e1e473fa0d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:42:50 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:50 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cceaf416'), local_subscribe_addr='ipc:///tmp/c17485cf-1cc7-433c-9b80-d2e33392d8cd', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3616477) WARNING 06-28 19:42:57 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3616478) WARNING 06-28 19:42:57 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3616476) WARNING 06-28 19:42:57 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3616475) WARNING 06-28 19:42:57 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:57 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_7d14ff7a'), local_subscribe_addr='ipc:///tmp/54dc6b12-0377-4d94-b6c9-a54dfc6fe0b4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:57 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:57 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:57 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:57 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:57 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3616477) WARNING 06-28 19:42:57 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:57 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:57 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3616478) WARNING 06-28 19:42:57 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3616476) WARNING 06-28 19:42:57 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:57 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3616475) WARNING 06-28 19:42:57 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:57 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:57 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:57 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:57 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:58 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:58 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:58 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:58 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:42:58 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.868548 seconds +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:42:58 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.867938 seconds +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:42:58 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.942615 seconds +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:42:58 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.920874 seconds +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:43:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:43:04 [backends.py:430] Dynamo bytecode transform time: 5.57 s +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:43:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:43:04 [backends.py:430] Dynamo bytecode transform time: 5.75 s +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:43:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:43:04 [backends.py:430] Dynamo bytecode transform time: 5.90 s +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:43:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:43:04 [backends.py:430] Dynamo bytecode transform time: 6.00 s +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.353 s +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.393 s +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.390 s +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.436 s +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:43:15 [monitor.py:33] torch.compile takes 5.75 s in total +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:43:15 [monitor.py:33] torch.compile takes 5.90 s in total +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:43:15 [monitor.py:33] torch.compile takes 6.00 s in total +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:43:15 [monitor.py:33] torch.compile takes 5.57 s in total +INFO 06-28 19:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 19:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 19:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 19:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3616478) INFO 06-28 19:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3616477) INFO 06-28 19:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3616475) INFO 06-28 19:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3616476) INFO 06-28 19:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 19:43:42 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.87 seconds +INFO 06-28 19:43:43 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 19:56:27 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 19:56:27 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5197|± |0.0280| +| | |math_pass@1:1_samples|0.7193|± |0.0465| +|mm\|arc_challenge\|0| 0|sem |0.5906|± |0.0252| +|mm\|arc_easy\|0 | 0|sem |0.6367|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5125|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7136|± |0.0214| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7250|± |0.0715| +|mm\|truthfulqa\|0 | 0|sem |0.3388|± |0.0432| + diff --git a/merge_bench/logs/llama_linear_1.log b/merge_bench/logs/llama_linear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..aa0d6989a0cb866f272c6f07d3395e1c8ffd9b8a --- /dev/null +++ b/merge_bench/logs/llama_linear_1.log @@ -0,0 +1,96 @@ +INFO 06-28 19:56:26 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 19:56:27 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 19:56:34 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'classify', 'generate', 'embed'}. Defaulting to 'generate'. +INFO 06-28 19:56:34 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 19:56:34 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 19:56:36 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 19:56:36 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 19:56:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e47c5064'), local_subscribe_addr='ipc:///tmp/e6ad432d-f508-4f32-bd1f-0d7c0725974d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:56:36 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e6b0e3d6'), local_subscribe_addr='ipc:///tmp/f98aba7d-bfc4-4b64-b79d-83126ab2f88c', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:56:36 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 19:56:36 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c6a69884'), local_subscribe_addr='ipc:///tmp/307da573-bf26-4a86-b0d1-2e4f53d94f88', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 19:56:36 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_28462e05'), local_subscribe_addr='ipc:///tmp/8a976985-b5bc-4a59-a4a6-9019466bb558', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:36 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2d722b0e'), local_subscribe_addr='ipc:///tmp/3a2db13c-7d51-4363-8f9e-c42a98ab0208', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:39 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:39 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3618691) WARNING 06-28 19:56:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3618692) WARNING 06-28 19:56:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3618690) WARNING 06-28 19:56:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3618689) WARNING 06-28 19:56:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_6c78750e'), local_subscribe_addr='ipc:///tmp/55c8e627-7a54-4c21-9f63-0cfcbd1725bc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:40 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3618690) WARNING 06-28 19:56:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:40 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:40 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:40 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3618691) WARNING 06-28 19:56:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3618692) WARNING 06-28 19:56:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3618689) WARNING 06-28 19:56:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:40 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:40 [loader.py:458] Loading weights took 0.65 seconds +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:40 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:41 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.840570 seconds +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.842686 seconds +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.982921 seconds +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.901269 seconds +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:47 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:47 [backends.py:430] Dynamo bytecode transform time: 5.86 s +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:47 [backends.py:430] Dynamo bytecode transform time: 5.94 s +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:47 [backends.py:430] Dynamo bytecode transform time: 5.96 s +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:52 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.336 s +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:52 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.390 s +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:52 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.406 s +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:52 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.512 s +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:56:58 [monitor.py:33] torch.compile takes 5.76 s in total +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:56:58 [monitor.py:33] torch.compile takes 5.96 s in total +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:56:58 [monitor.py:33] torch.compile takes 5.86 s in total +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:56:58 [monitor.py:33] torch.compile takes 5.94 s in total +INFO 06-28 19:56:59 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 19:56:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 19:56:59 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:56:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:56:59 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 19:56:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 19:56:59 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 19:56:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=1 pid=3618690) INFO 06-28 19:57:25 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3618691) INFO 06-28 19:57:25 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3618689) INFO 06-28 19:57:25 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3618692) INFO 06-28 19:57:25 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 19:57:25 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.39 seconds +INFO 06-28 19:57:26 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 20:10:01 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 20:10:01 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5198|± |0.0282| +| | |math_pass@1:1_samples|0.7070|± |0.0467| +|mm\|arc_challenge\|0| 0|sem |0.6037|± |0.0251| +|mm\|arc_easy\|0 | 0|sem |0.6336|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4781|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.6890|± |0.0219| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7250|± |0.0715| +|mm\|truthfulqa\|0 | 0|sem |0.3636|± |0.0439| + diff --git a/merge_bench/logs/llama_linear_3.log b/merge_bench/logs/llama_linear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..72839875233e934c144de18711f43e9c5aa3c90e --- /dev/null +++ b/merge_bench/logs/llama_linear_3.log @@ -0,0 +1,96 @@ +INFO 06-28 20:10:00 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 20:10:02 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 20:10:09 [config.py:717] This model supports multiple tasks: {'generate', 'reward', 'score', 'embed', 'classify'}. Defaulting to 'generate'. +INFO 06-28 20:10:09 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 20:10:09 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 20:10:11 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 20:10:11 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 20:10:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_6257e474'), local_subscribe_addr='ipc:///tmp/d597435c-2e4d-456f-89db-fcbedcefafc3', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:10:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6a207ca3'), local_subscribe_addr='ipc:///tmp/46e20e0c-64bd-473c-b401-bac5e8cadcaa', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:10:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0799eacd'), local_subscribe_addr='ipc:///tmp/4c8f80b6-f69b-4626-91a1-b0f3e0c81543', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:10:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 20:10:11 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5c611e31'), local_subscribe_addr='ipc:///tmp/0996616a-5d2f-4b7b-aaae-5d95c304730e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:11 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3d01097d'), local_subscribe_addr='ipc:///tmp/77a0146d-a0a9-4b05-86ed-60c0860a40fe', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:13 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:13 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3620671) WARNING 06-28 20:10:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3620672) WARNING 06-28 20:10:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3620670) WARNING 06-28 20:10:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3620669) WARNING 06-28 20:10:14 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_5c739929'), local_subscribe_addr='ipc:///tmp/27185b44-3b72-4a03-bb20-e0fcf7dc7d56', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:14 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:14 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:14 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:14 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3620672) WARNING 06-28 20:10:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3620671) WARNING 06-28 20:10:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:14 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3620670) WARNING 06-28 20:10:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3620669) WARNING 06-28 20:10:14 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:14 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:14 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:14 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:14 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:15 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:15 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:15 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:15 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:15 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.846524 seconds +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:15 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.885501 seconds +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:15 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.946978 seconds +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:15 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.930972 seconds +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:21 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:21 [backends.py:430] Dynamo bytecode transform time: 5.60 s +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:21 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:21 [backends.py:430] Dynamo bytecode transform time: 5.71 s +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:21 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:21 [backends.py:430] Dynamo bytecode transform time: 5.73 s +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:21 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:21 [backends.py:430] Dynamo bytecode transform time: 5.75 s +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.402 s +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.438 s +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.448 s +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.464 s +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:32 [monitor.py:33] torch.compile takes 5.71 s in total +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:32 [monitor.py:33] torch.compile takes 5.73 s in total +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:32 [monitor.py:33] torch.compile takes 5.60 s in total +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:32 [monitor.py:33] torch.compile takes 5.75 s in total +INFO 06-28 20:10:33 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 20:10:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 20:10:33 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:10:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:10:33 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:10:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:10:33 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 20:10:33 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3620672) INFO 06-28 20:10:56 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3620671) INFO 06-28 20:10:56 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3620669) INFO 06-28 20:10:56 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3620670) INFO 06-28 20:10:56 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.96 GiB +INFO 06-28 20:10:56 [core.py:159] init engine (profile, create kv cache, warmup model) took 41.41 seconds +INFO 06-28 20:10:57 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 20:23:36 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 20:23:36 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.4992|± |0.0276| +| | |math_pass@1:1_samples|0.8236|± |0.0343| +|mm\|arc_challenge\|0| 0|sem |0.5879|± |0.0252| +|mm\|arc_easy\|0 | 0|sem |0.6135|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5062|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7472|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9000|± |0.0480| +|mm\|truthfulqa\|0 | 0|sem |0.2893|± |0.0414| + diff --git a/merge_bench/logs/llama_linear_5.log b/merge_bench/logs/llama_linear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..e0de136ea82710686e2ecf9fbd9719c23630e561 --- /dev/null +++ b/merge_bench/logs/llama_linear_5.log @@ -0,0 +1,96 @@ +INFO 06-28 20:23:35 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 20:23:36 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 20:23:43 [config.py:717] This model supports multiple tasks: {'score', 'embed', 'reward', 'generate', 'classify'}. Defaulting to 'generate'. +INFO 06-28 20:23:43 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 20:23:43 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 20:23:45 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 20:23:45 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 20:23:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_b6685efa'), local_subscribe_addr='ipc:///tmp/299a1d34-2ed2-4341-988b-660b2e51724a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:23:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_919d78a9'), local_subscribe_addr='ipc:///tmp/e31e068f-fcfb-41a8-94de-c14d5b28536d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:23:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6406296c'), local_subscribe_addr='ipc:///tmp/036b4ed3-da30-4873-bf68-9a5a4cdd976b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:23:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 20:23:45 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c676c60a'), local_subscribe_addr='ipc:///tmp/1c4487f2-e80f-4d6a-a055-378524306660', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6cc8c80e'), local_subscribe_addr='ipc:///tmp/27e1dd2c-d6e2-4155-87cb-32e683bab21b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:47 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:47 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:47 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:47 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:47 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:47 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:47 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:47 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3622892) WARNING 06-28 20:23:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3622891) WARNING 06-28 20:23:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3622889) WARNING 06-28 20:23:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3622890) WARNING 06-28 20:23:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_e5cd3014'), local_subscribe_addr='ipc:///tmp/28a2752c-3acb-4fc3-9bbf-2d7f0e4908ab', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:48 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:48 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:48 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:48 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3622892) WARNING 06-28 20:23:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3622891) WARNING 06-28 20:23:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3622889) WARNING 06-28 20:23:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3622890) WARNING 06-28 20:23:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:49 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:49 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:49 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:49 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:49 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.870155 seconds +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:49 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.867179 seconds +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:49 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.924282 seconds +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:49 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.974858 seconds +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:55 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:23:55 [backends.py:430] Dynamo bytecode transform time: 5.56 s +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:55 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:23:55 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:55 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:23:55 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:55 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:23:55 [backends.py:430] Dynamo bytecode transform time: 5.82 s +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:24:00 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.652 s +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:24:00 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.663 s +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:24:00 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.576 s +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:24:00 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.604 s +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:24:06 [monitor.py:33] torch.compile takes 5.82 s in total +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:24:06 [monitor.py:33] torch.compile takes 5.76 s in total +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:24:06 [monitor.py:33] torch.compile takes 5.67 s in total +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:24:06 [monitor.py:33] torch.compile takes 5.56 s in total +INFO 06-28 20:24:07 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 20:24:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 20:24:07 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:24:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:24:07 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:24:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:24:07 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 20:24:07 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3622892) INFO 06-28 20:24:34 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3622891) INFO 06-28 20:24:34 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3622889) INFO 06-28 20:24:34 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3622890) INFO 06-28 20:24:34 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +INFO 06-28 20:24:34 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.75 seconds +INFO 06-28 20:24:34 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 20:37:14 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 20:37:14 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5256|± |0.0280| +| | |math_pass@1:1_samples|0.7443|± |0.0441| +|mm\|arc_challenge\|0| 0|sem |0.6115|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6251|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5188|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7136|± |0.0214| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench/logs/llama_linear_7.log b/merge_bench/logs/llama_linear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..21f76df10a9324c2fd28414014f914e8c3d6378f --- /dev/null +++ b/merge_bench/logs/llama_linear_7.log @@ -0,0 +1,96 @@ +INFO 06-28 20:37:13 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 20:37:14 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 20:37:21 [config.py:717] This model supports multiple tasks: {'generate', 'reward', 'embed', 'score', 'classify'}. Defaulting to 'generate'. +INFO 06-28 20:37:21 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 20:37:21 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 20:37:23 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 20:37:23 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 20:37:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_93b18dc7'), local_subscribe_addr='ipc:///tmp/7537e22e-27ae-4eed-8ba9-8f8926cf4814', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:37:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d66e4aef'), local_subscribe_addr='ipc:///tmp/5956dee2-1734-4465-acfb-51251cb047b9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:37:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_56d69c19'), local_subscribe_addr='ipc:///tmp/eee01736-5f87-422d-81df-6f613a8a4f39', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:37:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 20:37:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b35e06ee'), local_subscribe_addr='ipc:///tmp/70baefd3-b12c-4835-9947-e5bce282520d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9b3261e8'), local_subscribe_addr='ipc:///tmp/5affabae-a840-41f8-95e7-09f8fb3b8037', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3625205) WARNING 06-28 20:37:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3625203) WARNING 06-28 20:37:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3625204) WARNING 06-28 20:37:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3625202) WARNING 06-28 20:37:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:51 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_304efd39'), local_subscribe_addr='ipc:///tmp/4e473a91-f106-49ce-a859-61afa613a874', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:51 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:51 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:51 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:51 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3625205) WARNING 06-28 20:37:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3625204) WARNING 06-28 20:37:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3625203) WARNING 06-28 20:37:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3625202) WARNING 06-28 20:37:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:52 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:52 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:52 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:52 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.864541 seconds +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.913058 seconds +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.977701 seconds +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.913645 seconds +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:37:58 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:37:58 [backends.py:430] Dynamo bytecode transform time: 5.72 s +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:37:58 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:37:58 [backends.py:430] Dynamo bytecode transform time: 5.78 s +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:38:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.423 s +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:38:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.432 s +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:38:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.545 s +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:38:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.650 s +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:38:09 [monitor.py:33] torch.compile takes 5.76 s in total +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:38:09 [monitor.py:33] torch.compile takes 5.66 s in total +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:38:09 [monitor.py:33] torch.compile takes 5.72 s in total +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:38:09 [monitor.py:33] torch.compile takes 5.78 s in total +INFO 06-28 20:38:10 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 20:38:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 20:38:10 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:38:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:38:10 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:38:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:38:10 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 20:38:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3625202) INFO 06-28 20:38:37 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3625205) INFO 06-28 20:38:37 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3625203) INFO 06-28 20:38:37 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3625204) INFO 06-28 20:38:37 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +INFO 06-28 20:38:37 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.85 seconds +INFO 06-28 20:38:37 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 20:51:18 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 20:51:18 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5158|± |0.0282| +| | |math_pass@1:1_samples|0.6988|± |0.0481| +|mm\|arc_challenge\|0| 0|sem |0.5669|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6209|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5281|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7226|± |0.0212| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.6750|± |0.0750| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench/logs/llama_linear_9.log b/merge_bench/logs/llama_linear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..826e3acab81d57eb8a1c26981c8c9914a6e47c0e --- /dev/null +++ b/merge_bench/logs/llama_linear_9.log @@ -0,0 +1,96 @@ +INFO 06-28 20:51:17 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 20:51:18 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 20:51:25 [config.py:717] This model supports multiple tasks: {'reward', 'score', 'embed', 'classify', 'generate'}. Defaulting to 'generate'. +INFO 06-28 20:51:25 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 20:51:25 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 20:51:27 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 20:51:27 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 20:51:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_bb84ef60'), local_subscribe_addr='ipc:///tmp/9f532b5b-5fab-4bd7-a386-12ac4cb074df', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:51:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e583dcba'), local_subscribe_addr='ipc:///tmp/bb8329a0-d171-447f-bbb2-4bfaa38bf85a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:51:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2e196d70'), local_subscribe_addr='ipc:///tmp/6c61b285-be86-41cb-9cb1-705e39daaba9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 20:51:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 20:51:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4864d8cf'), local_subscribe_addr='ipc:///tmp/79d64628-22b1-4ea9-a2ec-ee94d71df77d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a9de9a12'), local_subscribe_addr='ipc:///tmp/943c88f2-b4dd-40ee-a3e3-13e59c2e57fc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3627171) WARNING 06-28 20:51:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3627172) WARNING 06-28 20:51:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3627170) WARNING 06-28 20:51:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3627169) WARNING 06-28 20:51:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_af3ce749'), local_subscribe_addr='ipc:///tmp/f1cecbba-d83a-49da-abec-058737ee8492', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:30 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:30 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:30 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:30 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3627172) WARNING 06-28 20:51:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3627171) WARNING 06-28 20:51:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3627169) WARNING 06-28 20:51:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3627170) WARNING 06-28 20:51:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:31 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:31 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:31 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:31 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.869730 seconds +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.849842 seconds +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:32 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.906712 seconds +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:32 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.951992 seconds +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:37 [backends.py:430] Dynamo bytecode transform time: 5.56 s +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:37 [backends.py:430] Dynamo bytecode transform time: 5.70 s +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:37 [backends.py:430] Dynamo bytecode transform time: 5.72 s +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:38 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:38 [backends.py:430] Dynamo bytecode transform time: 5.98 s +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.382 s +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:43 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.404 s +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:43 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.443 s +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:43 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.471 s +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:51:49 [monitor.py:33] torch.compile takes 5.56 s in total +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:51:49 [monitor.py:33] torch.compile takes 5.70 s in total +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:51:49 [monitor.py:33] torch.compile takes 5.72 s in total +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:51:49 [monitor.py:33] torch.compile takes 5.98 s in total +INFO 06-28 20:51:50 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 20:51:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 20:51:50 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:51:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:51:50 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 20:51:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 20:51:50 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 20:51:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=1 pid=3627170) INFO 06-28 20:52:16 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3627171) INFO 06-28 20:52:16 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3627169) INFO 06-28 20:52:16 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3627172) INFO 06-28 20:52:16 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 20:52:16 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.15 seconds +INFO 06-28 20:52:16 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 21:04:54 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 21:04:54 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5069|± |0.0276| +| | |math_pass@1:1_samples|0.7874|± |0.0392| +|mm\|arc_challenge\|0| 0|sem |0.6142|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6283|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4875|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7248|± |0.0211| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.2975|± |0.0417| + diff --git a/merge_bench/logs/llama_ties_1.log b/merge_bench/logs/llama_ties_1.log new file mode 100644 index 0000000000000000000000000000000000000000..6ac120cdcf87a6779ecd2a7d616e4f28273e2aea --- /dev/null +++ b/merge_bench/logs/llama_ties_1.log @@ -0,0 +1,96 @@ +INFO 06-28 21:04:53 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 21:04:55 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 21:05:02 [config.py:717] This model supports multiple tasks: {'generate', 'score', 'reward', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-28 21:05:02 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 21:05:02 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 21:05:03 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 21:05:03 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 21:05:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_49535664'), local_subscribe_addr='ipc:///tmp/396eb2be-5260-482c-8468-18f9912dab8c', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:05:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_32f083a9'), local_subscribe_addr='ipc:///tmp/4580c9ae-5fa5-4852-a517-8d5b635bf792', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:05:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_df472ef4'), local_subscribe_addr='ipc:///tmp/3bfded55-edfb-4719-9266-d163d3b02918', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:05:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 21:05:04 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_478853c5'), local_subscribe_addr='ipc:///tmp/d8b38252-237a-42ce-937a-196da3b5790e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ae4c4856'), local_subscribe_addr='ipc:///tmp/6ba7770b-4581-4bbb-806f-3a89604afdf9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:06 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:06 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3629140) WARNING 06-28 21:05:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3629139) WARNING 06-28 21:05:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3629138) WARNING 06-28 21:05:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3629137) WARNING 06-28 21:05:07 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_af396e84'), local_subscribe_addr='ipc:///tmp/5bd0bf44-5b41-424c-9852-44131328ab72', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:07 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:07 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:07 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:07 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3629140) WARNING 06-28 21:05:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3629139) WARNING 06-28 21:05:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3629138) WARNING 06-28 21:05:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:07 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3629137) WARNING 06-28 21:05:07 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:07 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:07 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:07 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:07 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:07 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:07 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:08 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:08 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.863475 seconds +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.861492 seconds +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.951411 seconds +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:08 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.920031 seconds +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:13 [backends.py:430] Dynamo bytecode transform time: 5.54 s +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:14 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:14 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:14 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:14 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:14 [backends.py:430] Dynamo bytecode transform time: 5.79 s +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:14 [backends.py:430] Dynamo bytecode transform time: 5.79 s +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.368 s +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:19 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.363 s +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:19 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.341 s +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:19 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.401 s +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:25 [monitor.py:33] torch.compile takes 5.54 s in total +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:25 [monitor.py:33] torch.compile takes 5.68 s in total +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:25 [monitor.py:33] torch.compile takes 5.79 s in total +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:25 [monitor.py:33] torch.compile takes 5.79 s in total +INFO 06-28 21:05:26 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 21:05:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 21:05:26 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:05:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:05:26 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:05:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:05:26 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 21:05:26 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=1 pid=3629138) INFO 06-28 21:05:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3629140) INFO 06-28 21:05:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3629137) INFO 06-28 21:05:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3629139) INFO 06-28 21:05:56 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +INFO 06-28 21:05:56 [core.py:159] init engine (profile, create kv cache, warmup model) took 48.16 seconds +INFO 06-28 21:05:56 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 21:18:38 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 21:18:38 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5101|± |0.0276| +| | |math_pass@1:1_samples|0.8100|± |0.0368| +|mm\|arc_challenge\|0| 0|sem |0.6063|± |0.0251| +|mm\|arc_easy\|0 | 0|sem |0.6336|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5031|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7450|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8750|± |0.0530| +|mm\|truthfulqa\|0 | 0|sem |0.2975|± |0.0417| + diff --git a/merge_bench/logs/llama_ties_3.log b/merge_bench/logs/llama_ties_3.log new file mode 100644 index 0000000000000000000000000000000000000000..9a5be4373ccbf2bf89481d403cb8a6eca635cea1 --- /dev/null +++ b/merge_bench/logs/llama_ties_3.log @@ -0,0 +1,96 @@ +INFO 06-28 21:18:37 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 21:18:38 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 21:18:45 [config.py:717] This model supports multiple tasks: {'reward', 'embed', 'generate', 'classify', 'score'}. Defaulting to 'generate'. +INFO 06-28 21:18:45 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 21:18:45 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 21:18:47 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 21:18:47 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 21:18:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_397c2c8c'), local_subscribe_addr='ipc:///tmp/9b35e708-cffe-4aa9-be48-15fbf9844d84', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:18:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:18:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3d5102a4'), local_subscribe_addr='ipc:///tmp/3c5e6239-1d7c-4267-9676-9ac237de72d8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:18:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 21:18:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:18:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e7b03a50'), local_subscribe_addr='ipc:///tmp/f48b8442-bcbe-4798-8547-c0a4ea2f1765', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:18:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:18:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_29183d5c'), local_subscribe_addr='ipc:///tmp/a52d762d-6263-45f1-9161-419a79d4cf72', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:18:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_570fd23d'), local_subscribe_addr='ipc:///tmp/c0c9db08-7e7c-48aa-86fa-0a111f16fdfa', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:18:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:18:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:18:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:18:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:18:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:18:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:18:59 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:18:59 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3631106) WARNING 06-28 21:18:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3631105) WARNING 06-28 21:18:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3631103) WARNING 06-28 21:18:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3631104) WARNING 06-28 21:18:59 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:18:59 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_0515b019'), local_subscribe_addr='ipc:///tmp/b7ff17de-8d57-419c-97f6-014d6d347e56', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:18:59 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:18:59 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:18:59 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:18:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:18:59 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3631105) WARNING 06-28 21:18:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:18:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:18:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:18:59 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3631106) WARNING 06-28 21:18:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3631103) WARNING 06-28 21:18:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3631104) WARNING 06-28 21:18:59 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:18:59 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:18:59 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:18:59 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:18:59 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:19:00 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:19:00 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:19:00 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:19:00 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:19:00 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.847711 seconds +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:19:00 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.853132 seconds +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:19:00 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.918891 seconds +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:19:01 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.961888 seconds +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:19:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:19:06 [backends.py:430] Dynamo bytecode transform time: 5.62 s +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:19:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:19:06 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:19:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:19:06 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:19:06 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:19:06 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:19:11 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.368 s +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:19:11 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.404 s +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:19:11 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.387 s +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:19:12 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.400 s +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:19:17 [monitor.py:33] torch.compile takes 5.62 s in total +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:19:17 [monitor.py:33] torch.compile takes 5.66 s in total +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:19:17 [monitor.py:33] torch.compile takes 5.66 s in total +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:19:17 [monitor.py:33] torch.compile takes 5.66 s in total +INFO 06-28 21:19:18 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 21:19:18 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 21:19:18 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:19:18 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:19:18 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:19:18 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:19:18 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 21:19:18 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3631105) INFO 06-28 21:19:45 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3631106) INFO 06-28 21:19:45 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3631103) INFO 06-28 21:19:45 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3631104) INFO 06-28 21:19:45 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 21:19:45 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.15 seconds +INFO 06-28 21:19:45 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 21:32:27 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 21:32:27 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.4954|± |0.0275| +| | |math_pass@1:1_samples|0.8201|± |0.0365| +|mm\|arc_challenge\|0| 0|sem |0.5774|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6452|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.4781|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7651|± |0.0201| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8750|± |0.0530| +|mm\|truthfulqa\|0 | 0|sem |0.2810|± |0.0410| + diff --git a/merge_bench/logs/llama_ties_5.log b/merge_bench/logs/llama_ties_5.log new file mode 100644 index 0000000000000000000000000000000000000000..c254e66b90746d2d60f2e31a7ba755cdd1c54918 --- /dev/null +++ b/merge_bench/logs/llama_ties_5.log @@ -0,0 +1,96 @@ +INFO 06-28 21:32:26 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 21:32:28 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 21:32:35 [config.py:717] This model supports multiple tasks: {'embed', 'classify', 'reward', 'generate', 'score'}. Defaulting to 'generate'. +INFO 06-28 21:32:35 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 21:32:35 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 21:32:37 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 21:32:37 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 21:32:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_f77824a9'), local_subscribe_addr='ipc:///tmp/0c065325-9bd3-44ae-b570-badee1d8a29a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:32:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e1ffa07c'), local_subscribe_addr='ipc:///tmp/188479cc-b446-481e-a6cf-70a39b355001', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:32:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f0f19da6'), local_subscribe_addr='ipc:///tmp/ba9731f7-cc39-4a0e-b49e-d77734e64886', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:32:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 21:32:37 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6669c6e5'), local_subscribe_addr='ipc:///tmp/b31583d8-d023-4014-bf0c-ae58a5bca37a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9793bc93'), local_subscribe_addr='ipc:///tmp/11e9b52f-d9ea-4a23-b8e3-05261cad20b0', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:40 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:40 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:40 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:40 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:40 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:40 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:40 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:40 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3633070) WARNING 06-28 21:32:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3633071) WARNING 06-28 21:32:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3633069) WARNING 06-28 21:32:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3633072) WARNING 06-28 21:32:40 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:40 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_d275a2db'), local_subscribe_addr='ipc:///tmp/b1872141-efd7-4f10-9c2b-1c8b9c1e6159', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:40 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:40 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:40 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:40 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3633072) WARNING 06-28 21:32:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3633071) WARNING 06-28 21:32:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:40 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3633070) WARNING 06-28 21:32:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3633069) WARNING 06-28 21:32:40 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:40 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:41 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:41 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:41 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:41 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.852514 seconds +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.853413 seconds +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:41 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.973925 seconds +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:42 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.914584 seconds +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:47 [backends.py:430] Dynamo bytecode transform time: 5.87 s +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:47 [backends.py:430] Dynamo bytecode transform time: 5.91 s +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:47 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:47 [backends.py:430] Dynamo bytecode transform time: 5.96 s +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:48 [backends.py:430] Dynamo bytecode transform time: 5.97 s +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:53 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.429 s +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:53 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.414 s +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:53 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.475 s +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:53 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.497 s +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:32:58 [monitor.py:33] torch.compile takes 5.97 s in total +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:32:58 [monitor.py:33] torch.compile takes 5.91 s in total +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:32:58 [monitor.py:33] torch.compile takes 5.87 s in total +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:32:58 [monitor.py:33] torch.compile takes 5.96 s in total +INFO 06-28 21:33:00 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 21:33:00 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 21:33:00 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:33:00 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:33:00 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:33:00 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:33:00 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 21:33:00 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3633071) INFO 06-28 21:33:27 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3633072) INFO 06-28 21:33:27 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3633069) INFO 06-28 21:33:27 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3633070) INFO 06-28 21:33:27 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +INFO 06-28 21:33:27 [core.py:159] init engine (profile, create kv cache, warmup model) took 45.17 seconds +INFO 06-28 21:33:27 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 21:46:09 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 21:46:09 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5174|± |0.0277| +| | |math_pass@1:1_samples|0.7736|± |0.0423| +|mm\|arc_challenge\|0| 0|sem |0.6220|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.6304|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5031|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7472|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3140|± |0.0424| + diff --git a/merge_bench/logs/llama_ties_7.log b/merge_bench/logs/llama_ties_7.log new file mode 100644 index 0000000000000000000000000000000000000000..c0f33786eab9ea7fde04f84467338fb9695f45ba --- /dev/null +++ b/merge_bench/logs/llama_ties_7.log @@ -0,0 +1,96 @@ +INFO 06-28 21:46:08 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 21:46:10 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 21:46:17 [config.py:717] This model supports multiple tasks: {'classify', 'reward', 'generate', 'score', 'embed'}. Defaulting to 'generate'. +INFO 06-28 21:46:17 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 21:46:17 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 21:46:18 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 21:46:18 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 21:46:18 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_ba897e64'), local_subscribe_addr='ipc:///tmp/e3fa3541-40e9-45f6-9069-61120d744d93', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:46:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e708e433'), local_subscribe_addr='ipc:///tmp/9b2b5f3c-b689-414b-9c6f-aa51bbbdd8b6', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:46:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a3d2107f'), local_subscribe_addr='ipc:///tmp/49648420-1505-4079-a94a-512c322bc00f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 21:46:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 21:46:19 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_028e2b27'), local_subscribe_addr='ipc:///tmp/60eb7f5a-ec4e-4d9f-8aa9-28dff9f36b3c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:19 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_5d554696'), local_subscribe_addr='ipc:///tmp/e1da4acd-28b5-4113-8560-1bba01f5f16a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3635043) WARNING 06-28 21:46:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3635042) WARNING 06-28 21:46:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3635040) WARNING 06-28 21:46:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3635041) WARNING 06-28 21:46:26 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:26 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_e2916777'), local_subscribe_addr='ipc:///tmp/3e96f702-a53e-4867-97a9-d4ef2f1ac5d1', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:26 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:26 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:26 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:26 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3635043) WARNING 06-28 21:46:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:26 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3635042) WARNING 06-28 21:46:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3635040) WARNING 06-28 21:46:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3635041) WARNING 06-28 21:46:26 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:26 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:26 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:26 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:26 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:27 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:27 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:27 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:27 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:27 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.871172 seconds +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:27 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.899707 seconds +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:28 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.912291 seconds +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:28 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.940351 seconds +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:33 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:33 [backends.py:430] Dynamo bytecode transform time: 5.50 s +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:33 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:33 [backends.py:430] Dynamo bytecode transform time: 5.60 s +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:33 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:33 [backends.py:430] Dynamo bytecode transform time: 5.61 s +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:33 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:33 [backends.py:430] Dynamo bytecode transform time: 5.65 s +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:38 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.333 s +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:38 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.358 s +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:38 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.400 s +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:38 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.373 s +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:46:44 [monitor.py:33] torch.compile takes 5.50 s in total +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:46:44 [monitor.py:33] torch.compile takes 5.61 s in total +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:46:44 [monitor.py:33] torch.compile takes 5.65 s in total +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:46:44 [monitor.py:33] torch.compile takes 5.60 s in total +INFO 06-28 21:46:45 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 21:46:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 21:46:45 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:46:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:46:45 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 21:46:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 21:46:45 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 21:46:45 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3635043) INFO 06-28 21:47:10 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3635042) INFO 06-28 21:47:10 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3635040) INFO 06-28 21:47:10 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3635041) INFO 06-28 21:47:10 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-28 21:47:10 [core.py:159] init engine (profile, create kv cache, warmup model) took 42.66 seconds +INFO 06-28 21:47:11 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 21:59:57 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 21:59:57 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5003|± |0.0276| +| | |math_pass@1:1_samples|0.7906|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.5774|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6283|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5062|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7562|± |0.0203| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.2893|± |0.0414| + diff --git a/merge_bench/logs/llama_ties_9.log b/merge_bench/logs/llama_ties_9.log new file mode 100644 index 0000000000000000000000000000000000000000..ceaa16f7d3b1d2cb00a4216367d671391822d3c2 --- /dev/null +++ b/merge_bench/logs/llama_ties_9.log @@ -0,0 +1,96 @@ +INFO 06-28 21:59:56 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 21:59:58 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 22:00:05 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'generate', 'reward', 'classify'}. Defaulting to 'generate'. +INFO 06-28 22:00:05 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 22:00:05 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 22:00:07 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 22:00:07 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 22:00:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_072005e2'), local_subscribe_addr='ipc:///tmp/3b8324da-8e55-4477-8b70-faf81399ad67', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 22:00:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_333f2675'), local_subscribe_addr='ipc:///tmp/6023b97e-9a60-41ff-8484-ab9fbab5e5b6', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 22:00:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_133d5830'), local_subscribe_addr='ipc:///tmp/49b7a9c2-4dc6-4fca-ae72-30cc06e6a06a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 22:00:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 22:00:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_84d0fca4'), local_subscribe_addr='ipc:///tmp/9511444a-afdf-4220-aac2-d0231e605465', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f4fb0ebf'), local_subscribe_addr='ipc:///tmp/7da8aa6f-3ad8-49da-95f2-d8239fe6d553', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:15 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:15 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:15 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:15 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:15 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:15 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:15 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:15 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3637009) WARNING 06-28 22:00:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3637008) WARNING 06-28 22:00:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3637006) WARNING 06-28 22:00:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3637007) WARNING 06-28 22:00:15 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_911522ef'), local_subscribe_addr='ipc:///tmp/c75ce496-9112-4e97-84ea-5fb9f862786a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:15 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:15 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:15 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:15 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:15 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:15 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:15 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3637008) WARNING 06-28 22:00:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3637009) WARNING 06-28 22:00:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3637006) WARNING 06-28 22:00:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:15 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3637007) WARNING 06-28 22:00:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:15 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:15 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:15 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:15 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:16 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:16 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:16 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:16 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:16 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.855716 seconds +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:17 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.858431 seconds +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:17 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.920326 seconds +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:17 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.960038 seconds +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:22 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:22 [backends.py:430] Dynamo bytecode transform time: 5.70 s +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:22 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:22 [backends.py:430] Dynamo bytecode transform time: 5.72 s +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:23 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:23 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:23 [backends.py:430] Dynamo bytecode transform time: 5.77 s +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:23 [backends.py:430] Dynamo bytecode transform time: 5.77 s +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:28 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.422 s +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:28 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.445 s +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:28 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.425 s +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:28 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.461 s +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:00:33 [monitor.py:33] torch.compile takes 5.77 s in total +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:00:33 [monitor.py:33] torch.compile takes 5.77 s in total +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:00:33 [monitor.py:33] torch.compile takes 5.70 s in total +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:00:33 [monitor.py:33] torch.compile takes 5.72 s in total +INFO 06-28 22:00:34 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 22:00:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 22:00:34 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 22:00:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 22:00:34 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 22:00:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 22:00:34 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 22:00:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3637008) INFO 06-28 22:01:00 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3637009) INFO 06-28 22:01:00 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3637006) INFO 06-28 22:01:00 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3637007) INFO 06-28 22:01:00 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 22:01:00 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.61 seconds +INFO 06-28 22:01:01 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 22:13:43 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 22:13:43 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5212|± |0.0279| +| | |math_pass@1:1_samples|0.7986|± |0.0389| +|mm\|arc_challenge\|0| 0|sem |0.6142|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6399|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5000|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7472|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + diff --git a/merge_bench/logs/phi_darelinear_1.log b/merge_bench/logs/phi_darelinear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..5d2c1e64b4ec02b0eff568d44fff333c49dcb86b --- /dev/null +++ b/merge_bench/logs/phi_darelinear_1.log @@ -0,0 +1,96 @@ +INFO 06-28 01:21:52 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 01:21:54 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 01:22:01 [config.py:717] This model supports multiple tasks: {'reward', 'generate', 'score', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-28 01:22:01 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 01:22:01 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 01:22:03 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 01:22:03 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 01:22:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_b2217354'), local_subscribe_addr='ipc:///tmp/a3e8bc96-bab3-4345-8a48-730fe105e3e1', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:22:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3acfa44b'), local_subscribe_addr='ipc:///tmp/4ef91927-c90f-43eb-a030-37c127c3362d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:22:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 01:22:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 01:22:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b59b84f7'), local_subscribe_addr='ipc:///tmp/a968292a-9ad5-4bce-89ea-6eaff6531d1c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3d01424e'), local_subscribe_addr='ipc:///tmp/1d75b13e-e9fb-472c-a09d-757ce058c078', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1b952b25'), local_subscribe_addr='ipc:///tmp/00d52892-dc3a-4cf8-babd-7ba75c78873b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3509493) WARNING 06-28 01:22:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3509490) WARNING 06-28 01:22:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3509492) WARNING 06-28 01:22:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3509491) WARNING 06-28 01:22:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_55349baa'), local_subscribe_addr='ipc:///tmp/c13ba9bd-36a6-4b0c-a6a9-cccb260e6d14', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:06 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:06 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:06 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:06 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3509493) WARNING 06-28 01:22:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3509492) WARNING 06-28 01:22:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3509490) WARNING 06-28 01:22:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3509491) WARNING 06-28 01:22:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:07 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:07 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:07 [loader.py:458] Loading weights took 0.77 seconds +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:07 [loader.py:458] Loading weights took 0.79 seconds +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.940849 seconds +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.983826 seconds +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.939358 seconds +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.013340 seconds +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:13 [backends.py:430] Dynamo bytecode transform time: 5.83 s +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:13 [backends.py:430] Dynamo bytecode transform time: 5.83 s +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:13 [backends.py:430] Dynamo bytecode transform time: 5.83 s +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:13 [backends.py:430] Dynamo bytecode transform time: 5.87 s +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.394 s +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.398 s +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.453 s +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.430 s +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:24 [monitor.py:33] torch.compile takes 5.83 s in total +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:24 [monitor.py:33] torch.compile takes 5.83 s in total +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:24 [monitor.py:33] torch.compile takes 5.83 s in total +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:24 [monitor.py:33] torch.compile takes 5.87 s in total +INFO 06-28 01:22:25 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 01:22:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 01:22:25 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 01:22:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 01:22:25 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 01:22:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 01:22:25 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 01:22:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3509490) INFO 06-28 01:22:51 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3509491) INFO 06-28 01:22:51 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3509493) INFO 06-28 01:22:51 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3509492) INFO 06-28 01:22:51 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 01:22:51 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.42 seconds +INFO 06-28 01:22:52 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 01:35:24 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 01:35:24 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5259|± |0.0278| +| | |math_pass@1:1_samples|0.7702|± |0.0424| +|mm\|arc_challenge\|0| 0|sem |0.6194|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.6367|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5250|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7405|± |0.0208| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3223|± |0.0427| + diff --git a/merge_bench/logs/phi_darelinear_3.log b/merge_bench/logs/phi_darelinear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..db8263bb1bca8786b1a8f4e6c1286da1d9049bc5 --- /dev/null +++ b/merge_bench/logs/phi_darelinear_3.log @@ -0,0 +1,96 @@ +INFO 06-28 01:35:23 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 01:35:25 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 01:35:32 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'classify', 'embed', 'reward'}. Defaulting to 'generate'. +INFO 06-28 01:35:32 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 01:35:32 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 01:35:34 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 01:35:34 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 01:35:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_f1a88531'), local_subscribe_addr='ipc:///tmp/4b8fdbbe-bfe9-49ea-81e6-583208874c6d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:35:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_191ebca1'), local_subscribe_addr='ipc:///tmp/b850358a-2e43-4778-a548-506d0ca4be92', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:35:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_dc536c27'), local_subscribe_addr='ipc:///tmp/17308bf4-154b-4d0f-9777-fd0d7e8f6a83', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:35:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 01:35:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3aa9fd19'), local_subscribe_addr='ipc:///tmp/d6edfdbd-538b-4086-8421-706a2dbd4119', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_99ba7d8f'), local_subscribe_addr='ipc:///tmp/714e0361-fdc5-4bb8-b328-ecc573c57fd8', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3512149) WARNING 06-28 01:35:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3512148) WARNING 06-28 01:35:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3512147) WARNING 06-28 01:35:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3512146) WARNING 06-28 01:35:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_431a8311'), local_subscribe_addr='ipc:///tmp/8b3b34f6-f867-4925-b5a0-7cd3f6afe61c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:37 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:37 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:37 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:37 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3512149) WARNING 06-28 01:35:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3512148) WARNING 06-28 01:35:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3512147) WARNING 06-28 01:35:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3512146) WARNING 06-28 01:35:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:38 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:38 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:38 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:38 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.892795 seconds +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.888533 seconds +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.959265 seconds +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.910843 seconds +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:44 [backends.py:430] Dynamo bytecode transform time: 5.58 s +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:44 [backends.py:430] Dynamo bytecode transform time: 5.62 s +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:44 [backends.py:430] Dynamo bytecode transform time: 5.74 s +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:44 [backends.py:430] Dynamo bytecode transform time: 5.80 s +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.361 s +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.393 s +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.462 s +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.444 s +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:35:55 [monitor.py:33] torch.compile takes 5.62 s in total +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:35:55 [monitor.py:33] torch.compile takes 5.74 s in total +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:35:55 [monitor.py:33] torch.compile takes 5.58 s in total +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:35:55 [monitor.py:33] torch.compile takes 5.80 s in total +INFO 06-28 01:35:56 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 01:35:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 01:35:56 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 01:35:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 01:35:56 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 01:35:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 01:35:56 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 01:35:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3512148) INFO 06-28 01:36:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3512149) INFO 06-28 01:36:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3512146) INFO 06-28 01:36:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3512147) INFO 06-28 01:36:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 01:36:22 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.87 seconds +INFO 06-28 01:36:22 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 01:48:58 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 01:48:58 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5141|± |0.0280| +| | |math_pass@1:1_samples|0.7988|± |0.0371| +|mm\|arc_challenge\|0| 0|sem |0.5801|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6209|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5250|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7226|± |0.0212| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8750|± |0.0530| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + diff --git a/merge_bench/logs/phi_darelinear_5.log b/merge_bench/logs/phi_darelinear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..497ad74359c0a789b631a53bc71bc5e072c83b78 --- /dev/null +++ b/merge_bench/logs/phi_darelinear_5.log @@ -0,0 +1,96 @@ +INFO 06-28 01:48:57 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 01:48:59 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 01:49:06 [config.py:717] This model supports multiple tasks: {'classify', 'generate', 'score', 'reward', 'embed'}. Defaulting to 'generate'. +INFO 06-28 01:49:06 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 01:49:06 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 01:49:07 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 01:49:07 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 01:49:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e6ed1dc2'), local_subscribe_addr='ipc:///tmp/7eae7c1e-515b-41c5-b887-41e9ee2cf4ea', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:49:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1080e32a'), local_subscribe_addr='ipc:///tmp/7db8a905-31e6-402f-8555-ad1d4729fe0e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:49:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a26fcf28'), local_subscribe_addr='ipc:///tmp/bd04efc5-4fbe-476e-9a5e-f560a6840f61', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 01:49:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 01:49:07 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_189894f8'), local_subscribe_addr='ipc:///tmp/d29cdad2-23d9-499b-9441-e80446dc5912', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_508e7988'), local_subscribe_addr='ipc:///tmp/96aceb6e-f7bf-41d1-a76c-0c8d028246dc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:10 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:10 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:10 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:10 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3515448) WARNING 06-28 01:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3515449) WARNING 06-28 01:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3515446) WARNING 06-28 01:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3515447) WARNING 06-28 01:49:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_110906cd'), local_subscribe_addr='ipc:///tmp/bae02c6c-f9ee-49c6-81f8-e4d4afacfdf2', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:11 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:11 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:11 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:11 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:11 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:11 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3515448) WARNING 06-28 01:49:11 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3515449) WARNING 06-28 01:49:11 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3515446) WARNING 06-28 01:49:11 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:11 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:11 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:11 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:11 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3515447) WARNING 06-28 01:49:11 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:11 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:11 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:11 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:11 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:11 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:11 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:12 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.911455 seconds +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:12 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.883583 seconds +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:12 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.973162 seconds +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:12 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.938663 seconds +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:17 [backends.py:430] Dynamo bytecode transform time: 5.61 s +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:18 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:18 [backends.py:430] Dynamo bytecode transform time: 5.72 s +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:18 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:18 [backends.py:430] Dynamo bytecode transform time: 5.78 s +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:18 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:18 [backends.py:430] Dynamo bytecode transform time: 5.81 s +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:22 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.387 s +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:23 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.424 s +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:23 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.454 s +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:23 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.450 s +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:28 [monitor.py:33] torch.compile takes 5.78 s in total +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:28 [monitor.py:33] torch.compile takes 5.72 s in total +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:28 [monitor.py:33] torch.compile takes 5.81 s in total +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:28 [monitor.py:33] torch.compile takes 5.61 s in total +INFO 06-28 01:49:30 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 01:49:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 01:49:30 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 01:49:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 01:49:30 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 01:49:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 01:49:30 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 01:49:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=1 pid=3515447) INFO 06-28 01:49:55 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3515449) INFO 06-28 01:49:55 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3515448) INFO 06-28 01:49:55 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3515446) INFO 06-28 01:49:55 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-28 01:49:55 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.09 seconds +INFO 06-28 01:49:55 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 02:02:37 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 02:02:37 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5234|± |0.0280| +| | |math_pass@1:1_samples|0.7316|± |0.0462| +|mm\|arc_challenge\|0| 0|sem |0.6089|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6315|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5062|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7383|± |0.0208| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7250|± |0.0715| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench/logs/phi_darelinear_7.log b/merge_bench/logs/phi_darelinear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..3b7e4ab99fdd57f6506db47eaede54df80d45d43 --- /dev/null +++ b/merge_bench/logs/phi_darelinear_7.log @@ -0,0 +1,96 @@ +INFO 06-28 02:02:36 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 02:02:38 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 02:02:45 [config.py:717] This model supports multiple tasks: {'reward', 'classify', 'score', 'generate', 'embed'}. Defaulting to 'generate'. +INFO 06-28 02:02:45 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 02:02:45 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 02:02:46 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 02:02:46 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 02:02:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e30e996e'), local_subscribe_addr='ipc:///tmp/82a2a047-ddbc-4d57-8204-54b364f14611', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:02:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e04fbba5'), local_subscribe_addr='ipc:///tmp/d2b21b36-91d6-4916-b86d-bd92d2be12f8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:02:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cf8334ce'), local_subscribe_addr='ipc:///tmp/565158bd-bf30-47f2-b1bf-3d8242588a09', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:02:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 02:02:47 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c3766619'), local_subscribe_addr='ipc:///tmp/25817364-7ca4-4212-b549-8c2349e2cdf9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c40b7e23'), local_subscribe_addr='ipc:///tmp/d70aee33-5ed7-4dfd-ad56-427d114db39a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:49 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:49 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3520096) WARNING 06-28 02:02:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3520097) WARNING 06-28 02:02:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3520095) WARNING 06-28 02:02:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3520094) WARNING 06-28 02:02:49 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:49 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_43887bb8'), local_subscribe_addr='ipc:///tmp/9f047318-5779-440e-81ee-e007b90cf083', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:49 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:49 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:49 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:49 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3520097) WARNING 06-28 02:02:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3520096) WARNING 06-28 02:02:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:49 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3520095) WARNING 06-28 02:02:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3520094) WARNING 06-28 02:02:49 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:49 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:49 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:49 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:49 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:50 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:50 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:50 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:50 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:51 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.870194 seconds +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:51 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.857993 seconds +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:51 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.926725 seconds +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:51 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.956879 seconds +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:02:56 [backends.py:430] Dynamo bytecode transform time: 5.65 s +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:02:56 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:57 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:02:57 [backends.py:430] Dynamo bytecode transform time: 5.73 s +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:57 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:02:57 [backends.py:430] Dynamo bytecode transform time: 5.77 s +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:03:02 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.403 s +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:03:02 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.440 s +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:03:02 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.459 s +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:03:02 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.458 s +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:03:07 [monitor.py:33] torch.compile takes 5.77 s in total +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:03:07 [monitor.py:33] torch.compile takes 5.73 s in total +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:03:07 [monitor.py:33] torch.compile takes 5.67 s in total +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:03:07 [monitor.py:33] torch.compile takes 5.65 s in total +INFO 06-28 02:03:09 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 02:03:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 02:03:09 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:03:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:03:09 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:03:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:03:09 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 02:03:09 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3520096) INFO 06-28 02:03:34 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3520097) INFO 06-28 02:03:34 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3520095) INFO 06-28 02:03:34 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3520094) INFO 06-28 02:03:34 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-28 02:03:34 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.12 seconds +INFO 06-28 02:03:34 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 02:16:05 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 02:16:05 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5132|± |0.0281| +| | |math_pass@1:1_samples|0.7533|± |0.0439| +|mm\|arc_challenge\|0| 0|sem |0.5932|± |0.0252| +|mm\|arc_easy\|0 | 0|sem |0.6220|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.4906|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7315|± |0.0210| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench/logs/phi_darelinear_9.log b/merge_bench/logs/phi_darelinear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..ff971c61091ff0b5126c375d4830b09c098e9804 --- /dev/null +++ b/merge_bench/logs/phi_darelinear_9.log @@ -0,0 +1,96 @@ +INFO 06-28 02:16:04 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 02:16:06 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 02:16:13 [config.py:717] This model supports multiple tasks: {'embed', 'generate', 'reward', 'score', 'classify'}. Defaulting to 'generate'. +INFO 06-28 02:16:13 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 02:16:13 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 02:16:15 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 02:16:15 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 02:16:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_3d0d1676'), local_subscribe_addr='ipc:///tmp/53082177-1424-40f4-ba33-0aea5b7c1554', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:16:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a80e1a15'), local_subscribe_addr='ipc:///tmp/99211d1d-a8ab-458b-ad09-678db2b1d0cc', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:16:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8a177959'), local_subscribe_addr='ipc:///tmp/8a42daca-2ec9-48f4-9764-9e1d8675c552', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:16:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 02:16:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_414db17a'), local_subscribe_addr='ipc:///tmp/1608ff44-ab8e-4556-af45-11be3dbe1b61', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_feaba932'), local_subscribe_addr='ipc:///tmp/c9cc0f59-c525-49dc-8ca9-f743a3e96e03', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3523120) WARNING 06-28 02:16:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3523119) WARNING 06-28 02:16:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3523118) WARNING 06-28 02:16:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3523117) WARNING 06-28 02:16:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:17 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_f636ee5c'), local_subscribe_addr='ipc:///tmp/1bb8e3a6-99a5-43af-97d6-b5e194c11329', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:17 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:17 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:17 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:17 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3523120) WARNING 06-28 02:16:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3523119) WARNING 06-28 02:16:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3523118) WARNING 06-28 02:16:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3523117) WARNING 06-28 02:16:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:18 [loader.py:458] Loading weights took 0.65 seconds +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:18 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:18 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:18 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.934303 seconds +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.928185 seconds +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.946575 seconds +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.890070 seconds +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:24 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:24 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:25 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:25 [backends.py:430] Dynamo bytecode transform time: 5.85 s +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:25 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:25 [backends.py:430] Dynamo bytecode transform time: 5.88 s +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:29 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.407 s +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:30 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.415 s +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:30 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.419 s +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:30 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.421 s +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:16:36 [monitor.py:33] torch.compile takes 5.66 s in total +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:16:36 [monitor.py:33] torch.compile takes 5.88 s in total +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:16:36 [monitor.py:33] torch.compile takes 5.85 s in total +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:16:36 [monitor.py:33] torch.compile takes 5.69 s in total +INFO 06-28 02:16:37 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 02:16:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 02:16:37 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:16:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:16:37 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:16:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:16:37 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 02:16:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3523119) INFO 06-28 02:17:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3523118) INFO 06-28 02:17:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3523120) INFO 06-28 02:17:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3523117) INFO 06-28 02:17:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 02:17:03 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.03 seconds +INFO 06-28 02:17:03 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 02:29:52 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 02:29:52 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5253|± |0.0279| +| | |math_pass@1:1_samples|0.7848|± |0.0420| +|mm\|arc_challenge\|0| 0|sem |0.6142|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6378|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5188|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7696|± |0.0199| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + diff --git a/merge_bench/logs/phi_linear_1.log b/merge_bench/logs/phi_linear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..a755e5f4bf7ff340a3f8ca7e066f17f071d6529c --- /dev/null +++ b/merge_bench/logs/phi_linear_1.log @@ -0,0 +1,100 @@ +INFO 06-27 02:28:22 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 02:28:24 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 02:28:30 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'generate', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-27 02:28:31 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 02:28:31 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 02:28:32 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 02:28:32 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 02:28:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_64c2b29a'), local_subscribe_addr='ipc:///tmp/4c604289-52fe-4fbf-9aff-29f47c927adc', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:28:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ef238695'), local_subscribe_addr='ipc:///tmp/f8eead83-296e-4a81-beb2-2f42053e6457', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:28:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_41841aee'), local_subscribe_addr='ipc:///tmp/9a6df31c-9333-4ead-9b5d-3f7288cd7b00', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:28:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 02:28:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7c043b9e'), local_subscribe_addr='ipc:///tmp/360601b1-8515-49b4-955a-b18734598b9d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d3023a17'), local_subscribe_addr='ipc:///tmp/82620656-c113-4dc5-bd83-39d18cd8bf2d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3429733) WARNING 06-27 02:28:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3429734) WARNING 06-27 02:28:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3429732) WARNING 06-27 02:28:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3429731) WARNING 06-27 02:28:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_1e3a61da'), local_subscribe_addr='ipc:///tmp/b4f1a266-7798-40fa-82df-2c25b5062d45', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:35 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:35 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:35 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:35 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3429734) WARNING 06-27 02:28:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3429733) WARNING 06-27 02:28:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3429731) WARNING 06-27 02:28:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3429732) WARNING 06-27 02:28:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:36 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:36 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:36 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:36 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.866745 seconds +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.870209 seconds +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.912511 seconds +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.957005 seconds +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:42 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:42 [backends.py:430] Dynamo bytecode transform time: 5.73 s +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:42 [backends.py:430] Dynamo bytecode transform time: 5.80 s +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:42 [backends.py:430] Dynamo bytecode transform time: 5.88 s +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:28:46 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:28:46 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:28:46 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:28:47 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:29:07 [backends.py:148] Compiling a graph for general shape takes 24.70 s +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:29:07 [backends.py:148] Compiling a graph for general shape takes 24.72 s +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:29:07 [backends.py:148] Compiling a graph for general shape takes 24.83 s +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:29:08 [backends.py:148] Compiling a graph for general shape takes 24.88 s +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:29:29 [monitor.py:33] torch.compile takes 30.76 s in total +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:29:29 [monitor.py:33] torch.compile takes 30.63 s in total +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:29:29 [monitor.py:33] torch.compile takes 30.37 s in total +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:29:29 [monitor.py:33] torch.compile takes 30.45 s in total +INFO 06-27 02:29:31 [kv_cache_utils.py:634] GPU KV cache size: 1,999,536 tokens +INFO 06-27 02:29:31 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.34x +INFO 06-27 02:29:31 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens +INFO 06-27 02:29:31 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x +INFO 06-27 02:29:31 [kv_cache_utils.py:634] GPU KV cache size: 1,999,280 tokens +INFO 06-27 02:29:31 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.21x +INFO 06-27 02:29:31 [kv_cache_utils.py:634] GPU KV cache size: 2,000,560 tokens +INFO 06-27 02:29:31 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 976.84x +(VllmWorker rank=3 pid=3429734) INFO 06-27 02:30:01 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3429733) INFO 06-27 02:30:01 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3429732) INFO 06-27 02:30:01 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3429731) INFO 06-27 02:30:01 [gpu_model_runner.py:1686] Graph capturing finished in 30 secs, took 2.96 GiB +INFO 06-27 02:30:01 [core.py:159] init engine (profile, create kv cache, warmup model) took 84.99 seconds +INFO 06-27 02:30:01 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 02:42:44 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 02:42:44 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5197|± |0.0280| +| | |math_pass@1:1_samples|0.7193|± |0.0465| +|mm\|arc_challenge\|0| 0|sem |0.5906|± |0.0252| +|mm\|arc_easy\|0 | 0|sem |0.6367|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5125|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7136|± |0.0214| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7250|± |0.0715| +|mm\|truthfulqa\|0 | 0|sem |0.3388|± |0.0432| + diff --git a/merge_bench/logs/phi_linear_2.log b/merge_bench/logs/phi_linear_2.log new file mode 100644 index 0000000000000000000000000000000000000000..3cdc1aab6f48fc606b2cbbec29151be276d0e744 --- /dev/null +++ b/merge_bench/logs/phi_linear_2.log @@ -0,0 +1,96 @@ +INFO 06-27 02:42:43 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 02:42:45 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 02:42:52 [config.py:717] This model supports multiple tasks: {'classify', 'reward', 'generate', 'score', 'embed'}. Defaulting to 'generate'. +INFO 06-27 02:42:52 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 02:42:52 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 02:42:54 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 02:42:54 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 02:42:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_f75c7f2e'), local_subscribe_addr='ipc:///tmp/128f64f5-39ca-4318-b6d6-702afc25e764', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:42:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8c6a899a'), local_subscribe_addr='ipc:///tmp/bfe1aa7b-3caf-4823-aee3-3b040821364d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:42:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 02:42:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_91075db3'), local_subscribe_addr='ipc:///tmp/fb40d5e0-49c2-414a-b62e-75f1e168124f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:42:54 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_56dad22d'), local_subscribe_addr='ipc:///tmp/bdc6a620-d195-4169-96e6-e1afc95097d1', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:54 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bbd791fe'), local_subscribe_addr='ipc:///tmp/8bc2120a-e48f-4551-be13-deeee6692171', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:56 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:56 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3438994) WARNING 06-27 02:42:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3438993) WARNING 06-27 02:42:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3438992) WARNING 06-27 02:42:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3438991) WARNING 06-27 02:42:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:56 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_835e5bd0'), local_subscribe_addr='ipc:///tmp/53797ae2-24e6-41c5-a70a-50496a13bf1a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:56 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:56 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3438992) WARNING 06-27 02:42:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:56 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=3438991) WARNING 06-27 02:42:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:56 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3438994) WARNING 06-27 02:42:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3438993) WARNING 06-27 02:42:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:57 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:57 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:57 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:57 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:42:57 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.881809 seconds +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:42:57 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.906570 seconds +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:42:58 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.904886 seconds +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:42:58 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.923940 seconds +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:43:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:43:03 [backends.py:430] Dynamo bytecode transform time: 5.62 s +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:43:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:43:04 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:43:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:43:04 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:43:04 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:43:04 [backends.py:430] Dynamo bytecode transform time: 5.94 s +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.426 s +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.417 s +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.500 s +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:43:09 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.542 s +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:43:15 [monitor.py:33] torch.compile takes 5.69 s in total +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:43:15 [monitor.py:33] torch.compile takes 5.94 s in total +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:43:15 [monitor.py:33] torch.compile takes 5.62 s in total +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:43:15 [monitor.py:33] torch.compile takes 5.76 s in total +INFO 06-27 02:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 02:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 02:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 02:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 02:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 02:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 02:43:16 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 02:43:16 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3438994) INFO 06-27 02:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3438993) INFO 06-27 02:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3438992) INFO 06-27 02:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3438991) INFO 06-27 02:43:42 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-27 02:43:42 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.34 seconds +INFO 06-27 02:43:43 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 02:56:22 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 02:56:22 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5228|± |0.0278| +| | |math_pass@1:1_samples|0.7669|± |0.0425| +|mm\|arc_challenge\|0| 0|sem |0.6168|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.6241|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5281|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7338|± |0.0209| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3223|± |0.0427| + diff --git a/merge_bench/logs/phi_linear_3.log b/merge_bench/logs/phi_linear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..cf870dcda7ea38d5177b1a5735382560d10d5786 --- /dev/null +++ b/merge_bench/logs/phi_linear_3.log @@ -0,0 +1,96 @@ +INFO 06-27 02:56:21 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 02:56:23 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 02:56:30 [config.py:717] This model supports multiple tasks: {'classify', 'generate', 'reward', 'embed', 'score'}. Defaulting to 'generate'. +INFO 06-27 02:56:30 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 02:56:30 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 02:56:32 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 02:56:32 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 02:56:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_033fa3da'), local_subscribe_addr='ipc:///tmp/a1051549-2dec-4688-9494-0718440d0c2a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:56:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6383bb93'), local_subscribe_addr='ipc:///tmp/94832504-1a54-45c4-a0b8-da03a934d306', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:56:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4b8e0cc5'), local_subscribe_addr='ipc:///tmp/9db3c5c5-8593-4669-b9ec-1b2dfcd93261', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 02:56:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 02:56:32 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b6e0d56f'), local_subscribe_addr='ipc:///tmp/3ebc8f6d-94f2-4041-b265-1596b1a7e5c3', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:32 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_43b80aeb'), local_subscribe_addr='ipc:///tmp/33b51447-7fe1-46a4-a147-ba820a37718b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3447778) WARNING 06-27 02:56:34 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3447776) WARNING 06-27 02:56:34 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3447772) WARNING 06-27 02:56:34 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3447773) WARNING 06-27 02:56:34 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_4f17e707'), local_subscribe_addr='ipc:///tmp/cc1369ad-3d5f-4a77-a9bf-bfb609385bef', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:34 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:34 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:34 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:34 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:34 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:34 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3447776) WARNING 06-27 02:56:34 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3447778) WARNING 06-27 02:56:34 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:34 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:34 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3447772) WARNING 06-27 02:56:34 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3447773) WARNING 06-27 02:56:34 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:34 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:34 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:34 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:34 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:35 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:35 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:35 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:35 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.878122 seconds +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.878944 seconds +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.937358 seconds +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.907204 seconds +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:41 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:41 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:42 [backends.py:430] Dynamo bytecode transform time: 5.80 s +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:42 [backends.py:430] Dynamo bytecode transform time: 5.82 s +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:42 [backends.py:430] Dynamo bytecode transform time: 5.88 s +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.405 s +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.381 s +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.368 s +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:47 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.486 s +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:56:52 [monitor.py:33] torch.compile takes 5.82 s in total +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:56:52 [monitor.py:33] torch.compile takes 5.68 s in total +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:56:52 [monitor.py:33] torch.compile takes 5.80 s in total +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:56:52 [monitor.py:33] torch.compile takes 5.88 s in total +INFO 06-27 02:56:54 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 02:56:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 02:56:54 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 02:56:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 02:56:54 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 02:56:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 02:56:54 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 02:56:54 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3447778) INFO 06-27 02:57:20 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3447776) INFO 06-27 02:57:20 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3447772) INFO 06-27 02:57:20 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3447773) INFO 06-27 02:57:20 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-27 02:57:20 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.10 seconds +INFO 06-27 02:57:20 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 03:10:01 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 03:10:01 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5275|± |0.0280| +| | |math_pass@1:1_samples|0.7814|± |0.0421| +|mm\|arc_challenge\|0| 0|sem |0.6115|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6315|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5281|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7629|± |0.0201| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3388|± |0.0432| + diff --git a/merge_bench/logs/phi_linear_4.log b/merge_bench/logs/phi_linear_4.log new file mode 100644 index 0000000000000000000000000000000000000000..ba712336237880d720a6295cdfd5e2e0b8e98d21 --- /dev/null +++ b/merge_bench/logs/phi_linear_4.log @@ -0,0 +1,96 @@ +INFO 06-27 03:10:00 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 03:10:02 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 03:10:08 [config.py:717] This model supports multiple tasks: {'embed', 'classify', 'score', 'generate', 'reward'}. Defaulting to 'generate'. +INFO 06-27 03:10:09 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 03:10:09 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 03:10:10 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 03:10:10 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 03:10:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_8f74b1a6'), local_subscribe_addr='ipc:///tmp/d6a3a804-af20-451f-bbde-88cafad6d09e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:10:10 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b833bb60'), local_subscribe_addr='ipc:///tmp/9b12d81b-f5a0-4a07-abff-2a89c6e7efc3', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:10:10 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_df1b06e4'), local_subscribe_addr='ipc:///tmp/5fbbbe13-7f22-462d-806c-b109618e0050', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:10:10 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 03:10:10 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_fd88f513'), local_subscribe_addr='ipc:///tmp/5519fa92-0b12-4661-87d3-60cc8720f215', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b47742a0'), local_subscribe_addr='ipc:///tmp/df1ac424-4f25-4f30-8aa8-4fa48b068a28', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:12 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:12 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:12 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:12 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:12 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:12 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:12 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:12 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3455003) WARNING 06-27 03:10:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3455001) WARNING 06-27 03:10:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3455004) WARNING 06-27 03:10:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3455002) WARNING 06-27 03:10:13 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:13 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_c686f274'), local_subscribe_addr='ipc:///tmp/6acb8797-fe95-4e20-b79c-c3db8cabe649', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:13 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:13 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:13 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:13 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:13 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3455003) WARNING 06-27 03:10:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3455004) WARNING 06-27 03:10:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3455001) WARNING 06-27 03:10:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3455002) WARNING 06-27 03:10:13 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:13 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:14 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:14 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:14 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:14 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:14 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.892547 seconds +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:14 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.893978 seconds +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:14 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.888170 seconds +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:15 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.959422 seconds +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:20 [backends.py:430] Dynamo bytecode transform time: 5.52 s +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:20 [backends.py:430] Dynamo bytecode transform time: 5.59 s +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:20 [backends.py:430] Dynamo bytecode transform time: 5.64 s +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:20 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:20 [backends.py:430] Dynamo bytecode transform time: 5.79 s +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.333 s +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.369 s +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:25 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.354 s +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:26 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.428 s +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:31 [monitor.py:33] torch.compile takes 5.59 s in total +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:31 [monitor.py:33] torch.compile takes 5.52 s in total +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:31 [monitor.py:33] torch.compile takes 5.79 s in total +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:31 [monitor.py:33] torch.compile takes 5.64 s in total +INFO 06-27 03:10:32 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 03:10:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 03:10:32 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:10:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:10:32 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:10:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:10:32 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 03:10:32 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3455003) INFO 06-27 03:10:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3455001) INFO 06-27 03:10:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3455004) INFO 06-27 03:10:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3455002) INFO 06-27 03:10:58 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-27 03:10:59 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.01 seconds +INFO 06-27 03:10:59 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 03:23:34 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 03:23:34 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5022|± |0.0273| +| | |math_pass@1:1_samples|0.7850|± |0.0407| +|mm\|arc_challenge\|0| 0|sem |0.6142|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6283|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4938|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7450|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.2727|± |0.0407| + diff --git a/merge_bench/logs/phi_linear_5.log b/merge_bench/logs/phi_linear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..fab308a3093aaecc7fce4930459adbb2a1cf54dc --- /dev/null +++ b/merge_bench/logs/phi_linear_5.log @@ -0,0 +1,96 @@ +INFO 06-27 03:23:33 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 03:23:35 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 03:23:42 [config.py:717] This model supports multiple tasks: {'score', 'embed', 'reward', 'generate', 'classify'}. Defaulting to 'generate'. +INFO 06-27 03:23:42 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 03:23:42 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 03:23:43 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 03:23:43 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 03:23:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_f203064f'), local_subscribe_addr='ipc:///tmp/580126a8-c433-4576-8303-f43bfd9ef804', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:23:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_22ad3f81'), local_subscribe_addr='ipc:///tmp/17f69624-7605-41fa-9a3c-7ef518f34993', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:23:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_48bc76d6'), local_subscribe_addr='ipc:///tmp/d0301c4f-b794-4755-a83d-b48f0a57b58d', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:23:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 03:23:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_32db5fc4'), local_subscribe_addr='ipc:///tmp/8781eb5d-4016-45ed-85cf-f2e24cd0b7f0', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_34e3bba9'), local_subscribe_addr='ipc:///tmp/f7be10cc-06a3-46b4-ba2f-e37bbc8351e1', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3457338) WARNING 06-27 03:23:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3457339) WARNING 06-27 03:23:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3457335) WARNING 06-27 03:23:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3457333) WARNING 06-27 03:23:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_77f60c8a'), local_subscribe_addr='ipc:///tmp/7bc40d89-5a96-45a9-b7ee-9c97d05ab0cb', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:46 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:46 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:46 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3457335) WARNING 06-27 03:23:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3457338) WARNING 06-27 03:23:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3457333) WARNING 06-27 03:23:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:46 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3457339) WARNING 06-27 03:23:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:47 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:47 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:47 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:47 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.863905 seconds +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.873327 seconds +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.905206 seconds +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.967568 seconds +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:53 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:53 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:53 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:53 [backends.py:430] Dynamo bytecode transform time: 5.75 s +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:23:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.403 s +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:23:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.397 s +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:23:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.397 s +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:23:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.404 s +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:24:04 [monitor.py:33] torch.compile takes 5.66 s in total +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:24:04 [monitor.py:33] torch.compile takes 5.68 s in total +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:24:04 [monitor.py:33] torch.compile takes 5.75 s in total +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:24:04 [monitor.py:33] torch.compile takes 5.69 s in total +INFO 06-27 03:24:05 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 03:24:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 03:24:05 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:24:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:24:05 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:24:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:24:05 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 03:24:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3457338) INFO 06-27 03:24:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3457339) INFO 06-27 03:24:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3457335) INFO 06-27 03:24:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3457333) INFO 06-27 03:24:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-27 03:24:31 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.73 seconds +INFO 06-27 03:24:31 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 03:37:15 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 03:37:15 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5198|± |0.0283| +| | |math_pass@1:1_samples|0.7397|± |0.0452| +|mm\|arc_challenge\|0| 0|sem |0.5748|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6220|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5188|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7293|± |0.0210| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7500|± |0.0693| +|mm\|truthfulqa\|0 | 0|sem |0.3636|± |0.0439| + diff --git a/merge_bench/logs/phi_linear_6.log b/merge_bench/logs/phi_linear_6.log new file mode 100644 index 0000000000000000000000000000000000000000..d264d217d55f1d2783a32fb624003bf98c31c1af --- /dev/null +++ b/merge_bench/logs/phi_linear_6.log @@ -0,0 +1,96 @@ +INFO 06-27 03:37:14 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 03:37:16 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 03:37:23 [config.py:717] This model supports multiple tasks: {'score', 'classify', 'generate', 'reward', 'embed'}. Defaulting to 'generate'. +INFO 06-27 03:37:23 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 03:37:23 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 03:37:25 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 03:37:25 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 03:37:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_580fe053'), local_subscribe_addr='ipc:///tmp/75627ddd-1278-43f9-9357-73bdfd261eb0', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:37:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_38aa069a'), local_subscribe_addr='ipc:///tmp/27c05aef-fc8a-4305-9b84-771d29610914', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:37:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a6e9872f'), local_subscribe_addr='ipc:///tmp/40a3df37-ef3f-4f32-8162-8a898c97b508', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:37:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 03:37:25 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_738e08a9'), local_subscribe_addr='ipc:///tmp/af4d4034-226e-4702-9021-b27ebe87aab7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_21fb3f2e'), local_subscribe_addr='ipc:///tmp/fbbf66d1-93ad-4810-be1a-c28e80ccb952', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:27 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:27 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:27 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:27 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:27 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:27 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:27 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:27 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3459154) WARNING 06-27 03:37:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3459152) WARNING 06-27 03:37:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3459150) WARNING 06-27 03:37:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3459149) WARNING 06-27 03:37:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_ff219e32'), local_subscribe_addr='ipc:///tmp/f13b016b-7c21-4dec-9987-b15d524f465b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:27 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:27 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:27 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:27 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3459152) WARNING 06-27 03:37:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3459154) WARNING 06-27 03:37:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3459150) WARNING 06-27 03:37:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3459149) WARNING 06-27 03:37:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:28 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:28 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:28 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:28 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:28 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.868632 seconds +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:29 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.862858 seconds +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:29 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.945122 seconds +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:29 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.898845 seconds +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:34 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:34 [backends.py:430] Dynamo bytecode transform time: 5.62 s +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:34 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:34 [backends.py:430] Dynamo bytecode transform time: 5.63 s +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:34 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:34 [backends.py:430] Dynamo bytecode transform time: 5.74 s +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:35 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:35 [backends.py:430] Dynamo bytecode transform time: 5.90 s +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:39 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.378 s +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:39 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.414 s +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:40 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.504 s +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:40 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.487 s +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:37:46 [monitor.py:33] torch.compile takes 5.62 s in total +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:37:46 [monitor.py:33] torch.compile takes 5.74 s in total +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:37:46 [monitor.py:33] torch.compile takes 5.63 s in total +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:37:46 [monitor.py:33] torch.compile takes 5.90 s in total +INFO 06-27 03:37:47 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 03:37:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 03:37:47 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:37:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:37:47 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:37:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:37:47 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 03:37:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3459152) INFO 06-27 03:38:13 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3459149) INFO 06-27 03:38:13 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3459154) INFO 06-27 03:38:13 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3459150) INFO 06-27 03:38:13 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-27 03:38:13 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.52 seconds +INFO 06-27 03:38:14 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 03:50:53 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 03:50:53 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5278|± |0.0280| +| | |math_pass@1:1_samples|0.7443|± |0.0441| +|mm\|arc_challenge\|0| 0|sem |0.6220|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.6325|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5094|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7136|± |0.0214| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench/logs/phi_linear_7.log b/merge_bench/logs/phi_linear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..4c3e1e07512ad48cdd696edeaa9bdaf22c8ed2ad --- /dev/null +++ b/merge_bench/logs/phi_linear_7.log @@ -0,0 +1,96 @@ +INFO 06-27 03:50:52 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 03:50:54 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 03:51:01 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'generate', 'reward', 'classify'}. Defaulting to 'generate'. +INFO 06-27 03:51:01 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 03:51:01 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 03:51:02 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 03:51:02 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 03:51:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_714b3dea'), local_subscribe_addr='ipc:///tmp/2f2fd7a1-d76f-40e9-9afb-0889889d0481', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:51:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_862c6351'), local_subscribe_addr='ipc:///tmp/aea04bd6-bf04-43a9-b63b-7e1d4d38328a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:51:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a03f8b8d'), local_subscribe_addr='ipc:///tmp/4b33314d-7b7a-4a0f-a474-f73a7288f7e5', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 03:51:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 03:51:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_334cdaef'), local_subscribe_addr='ipc:///tmp/09f5481c-e720-4801-a905-6174d455376a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f940b541'), local_subscribe_addr='ipc:///tmp/8c21ebc4-9e4a-4abe-9b85-0b5185ba2ca4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3461659) WARNING 06-27 03:51:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3461660) WARNING 06-27 03:51:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3461658) WARNING 06-27 03:51:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3461657) WARNING 06-27 03:51:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_61595aee'), local_subscribe_addr='ipc:///tmp/cf3c5a85-6794-4c17-b25b-e36130b1eef1', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:05 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:05 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:05 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:05 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3461660) WARNING 06-27 03:51:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3461659) WARNING 06-27 03:51:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3461657) WARNING 06-27 03:51:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3461658) WARNING 06-27 03:51:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:06 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:06 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:06 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:06 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.882061 seconds +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.863736 seconds +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.935206 seconds +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.874226 seconds +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:12 [backends.py:430] Dynamo bytecode transform time: 5.56 s +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:12 [backends.py:430] Dynamo bytecode transform time: 5.64 s +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:12 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:12 [backends.py:430] Dynamo bytecode transform time: 5.71 s +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.375 s +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.451 s +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.414 s +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.425 s +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:23 [monitor.py:33] torch.compile takes 5.56 s in total +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:23 [monitor.py:33] torch.compile takes 5.64 s in total +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:23 [monitor.py:33] torch.compile takes 5.68 s in total +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:23 [monitor.py:33] torch.compile takes 5.71 s in total +INFO 06-27 03:51:24 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 03:51:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 03:51:24 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:51:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:51:24 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 03:51:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 03:51:24 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 03:51:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3461660) INFO 06-27 03:51:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3461659) INFO 06-27 03:51:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3461658) INFO 06-27 03:51:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3461657) INFO 06-27 03:51:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-27 03:51:50 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.27 seconds +INFO 06-27 03:51:51 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 04:04:33 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 04:04:33 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5260|± |0.0279| +| | |math_pass@1:1_samples|0.7533|± |0.0439| +|mm\|arc_challenge\|0| 0|sem |0.6220|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.6357|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5156|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7315|± |0.0210| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + diff --git a/merge_bench/logs/phi_linear_8.log b/merge_bench/logs/phi_linear_8.log new file mode 100644 index 0000000000000000000000000000000000000000..3ddcc3291b2cd71aa1883792d9d7666cb45090b1 --- /dev/null +++ b/merge_bench/logs/phi_linear_8.log @@ -0,0 +1,96 @@ +INFO 06-27 04:04:32 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 04:04:34 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 04:04:41 [config.py:717] This model supports multiple tasks: {'generate', 'score', 'embed', 'reward', 'classify'}. Defaulting to 'generate'. +INFO 06-27 04:04:41 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 04:04:41 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 04:04:43 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 04:04:43 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 04:04:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_99c4f216'), local_subscribe_addr='ipc:///tmp/f947237b-7f6b-4094-8434-f89ab25aa6b2', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 04:04:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 04:04:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b587ff9a'), local_subscribe_addr='ipc:///tmp/6fbaae7a-578f-44bf-b908-cc00996cd192', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c0f62c48'), local_subscribe_addr='ipc:///tmp/7eb0d6c0-c7f6-4df3-95f8-a9553f4454b3', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 04:04:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7482f04a'), local_subscribe_addr='ipc:///tmp/7a18c3c1-b1e2-445b-88a8-cd69ddcee828', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 04:04:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_90cc6f29'), local_subscribe_addr='ipc:///tmp/24b5aa5a-c1df-4f7e-bf59-5856daa4fa6b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3463534) WARNING 06-27 04:04:45 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3463535) WARNING 06-27 04:04:45 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3463536) WARNING 06-27 04:04:45 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3463537) WARNING 06-27 04:04:45 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:45 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_6ce5769e'), local_subscribe_addr='ipc:///tmp/54247821-49ed-48c0-a116-4078da2aaa21', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:45 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:45 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:45 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:45 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:45 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:45 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3463534) WARNING 06-27 04:04:45 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:45 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3463535) WARNING 06-27 04:04:45 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:45 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3463536) WARNING 06-27 04:04:45 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3463537) WARNING 06-27 04:04:45 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:45 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:45 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:45 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:45 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:46 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:46 [loader.py:458] Loading weights took 0.65 seconds +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:46 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:46 [loader.py:458] Loading weights took 0.76 seconds +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.856983 seconds +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.856359 seconds +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.928136 seconds +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.963085 seconds +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:52 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:52 [backends.py:430] Dynamo bytecode transform time: 5.60 s +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:53 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:53 [backends.py:430] Dynamo bytecode transform time: 5.72 s +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:53 [backends.py:430] Dynamo bytecode transform time: 5.74 s +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:04:57 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.346 s +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:04:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.373 s +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:04:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.446 s +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:04:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.457 s +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:05:03 [monitor.py:33] torch.compile takes 5.60 s in total +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:05:03 [monitor.py:33] torch.compile takes 5.72 s in total +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:05:03 [monitor.py:33] torch.compile takes 5.66 s in total +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:05:03 [monitor.py:33] torch.compile takes 5.74 s in total +INFO 06-27 04:05:05 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 04:05:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 04:05:05 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 04:05:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 04:05:05 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 04:05:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 04:05:05 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 04:05:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3463537) INFO 06-27 04:05:29 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3463535) INFO 06-27 04:05:29 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3463534) INFO 06-27 04:05:29 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3463536) INFO 06-27 04:05:29 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-27 04:05:29 [core.py:159] init engine (profile, create kv cache, warmup model) took 42.49 seconds +INFO 06-27 04:05:30 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 04:18:11 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 04:18:11 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5092|± |0.0280| +| | |math_pass@1:1_samples|0.7633|± |0.0437| +|mm\|arc_challenge\|0| 0|sem |0.5748|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6283|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5031|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7517|± |0.0205| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + diff --git a/merge_bench/logs/phi_linear_9.log b/merge_bench/logs/phi_linear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..4d97b6349b0dfd6bbe6a63b5e84db160172701a3 --- /dev/null +++ b/merge_bench/logs/phi_linear_9.log @@ -0,0 +1,96 @@ +INFO 06-27 04:18:10 [__init__.py:239] Automatically detected platform cuda. +INFO 06-27 04:18:12 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-27 04:18:19 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'classify', 'generate', 'reward'}. Defaulting to 'generate'. +INFO 06-27 04:18:19 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-27 04:18:19 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-27 04:18:20 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-27 04:18:20 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-27 04:18:20 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_38b9ed00'), local_subscribe_addr='ipc:///tmp/927bf6ed-7718-4102-810c-743be7f346f7', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 04:18:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_88552a69'), local_subscribe_addr='ipc:///tmp/d6f5d2c8-7c9b-46ca-a769-431b1606bc1e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 04:18:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a1cdac22'), local_subscribe_addr='ipc:///tmp/1004e10b-1501-42e5-8d81-9e7ba71d12c2', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-27 04:18:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-27 04:18:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2de4b4e3'), local_subscribe_addr='ipc:///tmp/7ceea1d7-a038-4bab-815e-73854524454e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c8ff2cd5'), local_subscribe_addr='ipc:///tmp/b866d7f8-e437-4e63-b251-1bdee58d9c83', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:22 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:22 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:22 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:22 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:22 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:22 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:22 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:22 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3465349) WARNING 06-27 04:18:23 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3465350) WARNING 06-27 04:18:23 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3465347) WARNING 06-27 04:18:23 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3465348) WARNING 06-27 04:18:23 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_a1278e9a'), local_subscribe_addr='ipc:///tmp/d57b6989-89f2-4332-a268-20cdd1112732', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:23 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:23 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:23 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:23 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3465350) WARNING 06-27 04:18:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3465349) WARNING 06-27 04:18:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3465347) WARNING 06-27 04:18:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3465348) WARNING 06-27 04:18:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:24 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:24 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:24 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:24 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:24 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.875188 seconds +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:24 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.871170 seconds +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:24 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.937340 seconds +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:24 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.886571 seconds +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:30 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:30 [backends.py:430] Dynamo bytecode transform time: 5.63 s +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:30 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:30 [backends.py:430] Dynamo bytecode transform time: 5.64 s +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:30 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:30 [backends.py:430] Dynamo bytecode transform time: 5.71 s +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:30 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:30 [backends.py:430] Dynamo bytecode transform time: 5.84 s +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:35 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.371 s +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:35 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.381 s +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:35 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.384 s +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:35 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.415 s +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:18:41 [monitor.py:33] torch.compile takes 5.71 s in total +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:18:41 [monitor.py:33] torch.compile takes 5.63 s in total +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:18:41 [monitor.py:33] torch.compile takes 5.84 s in total +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:18:41 [monitor.py:33] torch.compile takes 5.64 s in total +INFO 06-27 04:18:42 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-27 04:18:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-27 04:18:42 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 04:18:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 04:18:42 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-27 04:18:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-27 04:18:42 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-27 04:18:42 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3465350) INFO 06-27 04:19:08 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3465349) INFO 06-27 04:19:08 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3465348) INFO 06-27 04:19:08 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3465347) INFO 06-27 04:19:08 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-27 04:19:08 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.42 seconds +INFO 06-27 04:19:08 [core_client.py:439] Core engine process 0 ready. +INFO 06-27 04:31:48 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 04:31:48 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5194|± |0.0279| +| | |math_pass@1:1_samples|0.8031|± |0.0388| +|mm\|arc_challenge\|0| 0|sem |0.5906|± |0.0252| +|mm\|arc_easy\|0 | 0|sem |0.6272|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5375|± |0.0279| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7562|± |0.0203| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.3223|± |0.0427| + diff --git a/merge_bench/logs/phi_ties_1.log b/merge_bench/logs/phi_ties_1.log new file mode 100644 index 0000000000000000000000000000000000000000..6f862cdd34a65773241c72e064ed43fbbb0a2d68 --- /dev/null +++ b/merge_bench/logs/phi_ties_1.log @@ -0,0 +1,96 @@ +INFO 06-28 00:04:19 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 00:04:20 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 00:04:28 [config.py:717] This model supports multiple tasks: {'reward', 'generate', 'score', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-28 00:04:28 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 00:04:28 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 00:04:30 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 00:04:30 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 00:04:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_2abe6abe'), local_subscribe_addr='ipc:///tmp/457416df-872d-4317-b106-2134e675d0da', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 00:04:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 00:04:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_685504c9'), local_subscribe_addr='ipc:///tmp/2a5ea551-f9c4-451f-adec-163349ef193a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 00:04:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 00:04:30 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ee5b58f4'), local_subscribe_addr='ipc:///tmp/dcfa2e9f-cb07-43e4-89da-be93880cfb53', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d60d37fc'), local_subscribe_addr='ipc:///tmp/27722dbf-c5aa-42c3-a7aa-0f0e9d7aa849', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_120c3c8a'), local_subscribe_addr='ipc:///tmp/e38be149-f63e-4ee1-b9e2-7857fd7453f4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:37 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:37 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:37 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:37 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:37 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:37 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:37 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:37 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3498583) WARNING 06-28 00:04:38 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3498581) WARNING 06-28 00:04:38 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3498580) WARNING 06-28 00:04:38 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3498582) WARNING 06-28 00:04:38 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:38 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_99e5fc0d'), local_subscribe_addr='ipc:///tmp/46fa865f-a132-40a8-8b2d-8aa796c96a28', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:38 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:38 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:38 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:38 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:38 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:38 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:38 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:38 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3498580) WARNING 06-28 00:04:38 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3498581) WARNING 06-28 00:04:38 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3498583) WARNING 06-28 00:04:38 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3498582) WARNING 06-28 00:04:38 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:38 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:38 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:38 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:38 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:39 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:39 [loader.py:458] Loading weights took 0.76 seconds +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:39 [loader.py:458] Loading weights took 0.76 seconds +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:39 [loader.py:458] Loading weights took 0.77 seconds +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.962431 seconds +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.961445 seconds +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.955792 seconds +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:39 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.009121 seconds +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:45 [backends.py:430] Dynamo bytecode transform time: 6.12 s +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:45 [backends.py:430] Dynamo bytecode transform time: 6.12 s +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:45 [backends.py:430] Dynamo bytecode transform time: 6.12 s +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:45 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:45 [backends.py:430] Dynamo bytecode transform time: 6.12 s +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:51 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.659 s +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:51 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.731 s +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:51 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.703 s +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:51 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.765 s +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:04:56 [monitor.py:33] torch.compile takes 6.12 s in total +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:04:56 [monitor.py:33] torch.compile takes 6.12 s in total +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:04:56 [monitor.py:33] torch.compile takes 6.12 s in total +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:04:56 [monitor.py:33] torch.compile takes 6.12 s in total +INFO 06-28 00:04:58 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 00:04:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 00:04:58 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 00:04:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 00:04:58 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 00:04:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 00:04:58 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 00:04:58 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3498582) INFO 06-28 00:05:23 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3498580) INFO 06-28 00:05:23 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3498581) INFO 06-28 00:05:23 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3498583) INFO 06-28 00:05:23 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-28 00:05:23 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.71 seconds +INFO 06-28 00:05:23 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 00:18:11 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 00:18:12 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5163|± |0.0281| +| | |math_pass@1:1_samples|0.7770|± |0.0422| +|mm\|arc_challenge\|0| 0|sem |0.6010|± |0.0251| +|mm\|arc_easy\|0 | 0|sem |0.6325|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4844|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench/logs/phi_ties_3.log b/merge_bench/logs/phi_ties_3.log new file mode 100644 index 0000000000000000000000000000000000000000..abf156a9a0b8f804074e2e3dd98106084d94078c --- /dev/null +++ b/merge_bench/logs/phi_ties_3.log @@ -0,0 +1,96 @@ +INFO 06-28 00:18:11 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 00:18:12 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 00:18:20 [config.py:717] This model supports multiple tasks: {'reward', 'score', 'generate', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-28 00:18:20 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 00:18:20 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 00:18:21 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 00:18:21 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 00:18:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_64a3cf43'), local_subscribe_addr='ipc:///tmp/48692c60-28a5-46d0-84af-9a1b5e3fde25', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 00:18:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 00:18:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 00:18:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 00:18:22 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3b32a873'), local_subscribe_addr='ipc:///tmp/47b03dd2-7cda-4a81-b056-a71be7086532', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9031601a'), local_subscribe_addr='ipc:///tmp/4f5a3729-d58c-4c0c-9aff-59cfa0cf970b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0333bbf3'), local_subscribe_addr='ipc:///tmp/f8ecc6d9-14fe-4bb9-9f76-449d44779b1b', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d7d0086a'), local_subscribe_addr='ipc:///tmp/6342ef1b-0bd2-437e-a6a8-31352458218f', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3502091) WARNING 06-28 00:18:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3502090) WARNING 06-28 00:18:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3502092) WARNING 06-28 00:18:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3502093) WARNING 06-28 00:18:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_d8c777a2'), local_subscribe_addr='ipc:///tmp/36440de9-abb4-4834-bdcd-4771f8e0a2eb', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:35 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:35 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:35 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:35 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3502091) WARNING 06-28 00:18:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3502090) WARNING 06-28 00:18:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3502093) WARNING 06-28 00:18:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3502092) WARNING 06-28 00:18:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:36 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:36 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:36 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:36 [loader.py:458] Loading weights took 0.80 seconds +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.876938 seconds +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.883893 seconds +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.973901 seconds +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.031346 seconds +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:42 [backends.py:430] Dynamo bytecode transform time: 6.00 s +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:42 [backends.py:430] Dynamo bytecode transform time: 6.00 s +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:42 [backends.py:430] Dynamo bytecode transform time: 6.14 s +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:42 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:42 [backends.py:430] Dynamo bytecode transform time: 6.19 s +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.704 s +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.755 s +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 5.082 s +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 5.240 s +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:18:54 [monitor.py:33] torch.compile takes 6.00 s in total +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:18:54 [monitor.py:33] torch.compile takes 6.00 s in total +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:18:54 [monitor.py:33] torch.compile takes 6.19 s in total +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:18:54 [monitor.py:33] torch.compile takes 6.14 s in total +INFO 06-28 00:18:55 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 00:18:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 00:18:55 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 00:18:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 00:18:55 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 00:18:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 00:18:55 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 00:18:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3502090) INFO 06-28 00:19:22 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3502091) INFO 06-28 00:19:22 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3502093) INFO 06-28 00:19:23 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3502092) INFO 06-28 00:19:23 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +INFO 06-28 00:19:23 [core.py:159] init engine (profile, create kv cache, warmup model) took 46.42 seconds +INFO 06-28 00:19:23 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 00:31:57 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 00:31:57 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5150|± |0.0277| +| | |math_pass@1:1_samples|0.7385|± |0.0452| +|mm\|arc_challenge\|0| 0|sem |0.6220|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.6241|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5000|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7271|± |0.0211| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7500|± |0.0693| +|mm\|truthfulqa\|0 | 0|sem |0.3140|± |0.0424| + diff --git a/merge_bench/logs/phi_ties_5.log b/merge_bench/logs/phi_ties_5.log new file mode 100644 index 0000000000000000000000000000000000000000..f757a3614e4f43660f977dc53d36cfd1e01f893e --- /dev/null +++ b/merge_bench/logs/phi_ties_5.log @@ -0,0 +1,96 @@ +INFO 06-28 02:29:51 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 02:29:53 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 02:30:00 [config.py:717] This model supports multiple tasks: {'generate', 'score', 'embed', 'reward', 'classify'}. Defaulting to 'generate'. +INFO 06-28 02:30:00 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 02:30:00 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 02:30:01 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 02:30:01 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 02:30:01 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_604e8e56'), local_subscribe_addr='ipc:///tmp/6a5d04f5-6450-485a-b7da-b2fb9b33f0d9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:30:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_8e0bd40b'), local_subscribe_addr='ipc:///tmp/14b598b5-accc-4bf5-a43a-15a15c4b1a6f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:30:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_937ccfd1'), local_subscribe_addr='ipc:///tmp/e1d4207d-bd2c-4669-bdb5-d2808557c43c', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:30:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 02:30:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_850fb964'), local_subscribe_addr='ipc:///tmp/dfdeb6ab-4cc1-4a1c-b3d0-6a304eeeaadd', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9434e564'), local_subscribe_addr='ipc:///tmp/789ee0fa-631c-4e35-a794-701f51a258f1', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3525089) WARNING 06-28 02:30:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3525090) WARNING 06-28 02:30:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3525088) WARNING 06-28 02:30:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3525087) WARNING 06-28 02:30:04 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_8a453fcb'), local_subscribe_addr='ipc:///tmp/3b68802e-dd95-43b0-a83e-ed9aadda8540', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:04 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:04 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:04 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:04 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3525089) WARNING 06-28 02:30:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3525090) WARNING 06-28 02:30:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:04 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3525088) WARNING 06-28 02:30:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3525087) WARNING 06-28 02:30:04 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:04 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:04 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:04 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:04 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:05 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:05 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:05 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:05 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:05 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.918278 seconds +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.922065 seconds +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.895514 seconds +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.973592 seconds +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:11 [backends.py:430] Dynamo bytecode transform time: 5.65 s +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:11 [backends.py:430] Dynamo bytecode transform time: 5.73 s +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:11 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:11 [backends.py:430] Dynamo bytecode transform time: 5.81 s +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:11 [backends.py:430] Dynamo bytecode transform time: 5.81 s +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:16 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.407 s +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.434 s +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.482 s +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.454 s +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:22 [monitor.py:33] torch.compile takes 5.81 s in total +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:22 [monitor.py:33] torch.compile takes 5.81 s in total +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:22 [monitor.py:33] torch.compile takes 5.65 s in total +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:22 [monitor.py:33] torch.compile takes 5.73 s in total +INFO 06-28 02:30:24 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 02:30:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 02:30:24 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:30:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:30:24 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:30:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:30:24 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 02:30:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3525090) INFO 06-28 02:30:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3525087) INFO 06-28 02:30:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3525089) INFO 06-28 02:30:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3525088) INFO 06-28 02:30:50 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 02:30:50 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.44 seconds +INFO 06-28 02:30:50 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 02:43:29 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 02:43:29 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5091|± |0.0279| +| | |math_pass@1:1_samples|0.7928|± |0.0405| +|mm\|arc_challenge\|0| 0|sem |0.5827|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6410|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.4906|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7606|± |0.0202| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.3223|± |0.0427| + diff --git a/merge_bench/logs/phi_ties_7.log b/merge_bench/logs/phi_ties_7.log new file mode 100644 index 0000000000000000000000000000000000000000..b92d594acb5f4af8a289c18b0432d386294144d0 --- /dev/null +++ b/merge_bench/logs/phi_ties_7.log @@ -0,0 +1,96 @@ +INFO 06-28 02:43:28 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 02:43:30 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 02:43:37 [config.py:717] This model supports multiple tasks: {'classify', 'generate', 'reward', 'embed', 'score'}. Defaulting to 'generate'. +INFO 06-28 02:43:37 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 02:43:37 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 02:43:39 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 02:43:39 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 02:43:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_901a0879'), local_subscribe_addr='ipc:///tmp/087980f4-6715-439e-ade5-e490bb2ff57e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:43:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a12311a9'), local_subscribe_addr='ipc:///tmp/25170eb0-7a2f-4e8d-ad5a-bf695aad19fd', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:43:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 02:43:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e733a3eb'), local_subscribe_addr='ipc:///tmp/5e2ea1ec-c5f6-4d6f-90a6-4c58def244aa', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:43:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_65524c3c'), local_subscribe_addr='ipc:///tmp/66934d46-2cd6-48fd-861e-d91c8468b582', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_96394c91'), local_subscribe_addr='ipc:///tmp/ef2160b0-e13f-4fbf-bc5a-3a12e6da0e19', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:41 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:41 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:41 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:41 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:41 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:41 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:41 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:41 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3527443) WARNING 06-28 02:43:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3527442) WARNING 06-28 02:43:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3527441) WARNING 06-28 02:43:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3527440) WARNING 06-28 02:43:41 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_91ddd545'), local_subscribe_addr='ipc:///tmp/79bc8596-ad99-4743-9101-657250aa290c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:41 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:41 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:41 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:41 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3527442) WARNING 06-28 02:43:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:41 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:41 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:41 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:41 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3527443) WARNING 06-28 02:43:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3527440) WARNING 06-28 02:43:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3527441) WARNING 06-28 02:43:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:41 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:41 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:41 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:41 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:42 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:42 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:42 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:42 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:42 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.878173 seconds +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:43 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.875533 seconds +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:43 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.944981 seconds +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:43 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.922516 seconds +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:48 [backends.py:430] Dynamo bytecode transform time: 5.56 s +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:49 [backends.py:430] Dynamo bytecode transform time: 5.58 s +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:49 [backends.py:430] Dynamo bytecode transform time: 5.78 s +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:49 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:49 [backends.py:430] Dynamo bytecode transform time: 5.86 s +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:43:53 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.346 s +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:43:54 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.360 s +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:43:54 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.368 s +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:43:54 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.479 s +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:44:00 [monitor.py:33] torch.compile takes 5.56 s in total +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:44:00 [monitor.py:33] torch.compile takes 5.78 s in total +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:44:00 [monitor.py:33] torch.compile takes 5.86 s in total +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:44:00 [monitor.py:33] torch.compile takes 5.58 s in total +INFO 06-28 02:44:01 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 02:44:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 02:44:01 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:44:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:44:01 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:44:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:44:01 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 02:44:01 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3527442) INFO 06-28 02:44:27 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3527440) INFO 06-28 02:44:27 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3527443) INFO 06-28 02:44:27 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3527441) INFO 06-28 02:44:27 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 02:44:27 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.92 seconds +INFO 06-28 02:44:27 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 02:57:13 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 02:57:13 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5215|± |0.0275| +| | |math_pass@1:1_samples|0.7805|± |0.0409| +|mm\|arc_challenge\|0| 0|sem |0.6352|± |0.0247| +|mm\|arc_easy\|0 | 0|sem |0.6410|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5125|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7360|± |0.0209| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.2975|± |0.0417| + diff --git a/merge_bench/logs/phi_ties_9.log b/merge_bench/logs/phi_ties_9.log new file mode 100644 index 0000000000000000000000000000000000000000..2b64fda2c6825a3ad2a26640522c5d969715e655 --- /dev/null +++ b/merge_bench/logs/phi_ties_9.log @@ -0,0 +1,96 @@ +INFO 06-28 02:57:12 [__init__.py:239] Automatically detected platform cuda. +INFO 06-28 02:57:14 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-28 02:57:21 [config.py:717] This model supports multiple tasks: {'reward', 'embed', 'generate', 'classify', 'score'}. Defaulting to 'generate'. +INFO 06-28 02:57:21 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-28 02:57:21 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-28 02:57:22 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-28 02:57:22 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-28 02:57:22 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e5197895'), local_subscribe_addr='ipc:///tmp/b0a44ef9-bda3-4b70-a828-3d8153cb25e8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:57:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_66e46478'), local_subscribe_addr='ipc:///tmp/3efa8e16-e41b-4f7c-94f9-95ea91664c1f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:57:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b98a5dc9'), local_subscribe_addr='ipc:///tmp/2870b470-eb7a-4d36-bd20-881fd3cf2c8c', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-28 02:57:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-28 02:57:23 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_263260cf'), local_subscribe_addr='ipc:///tmp/833452ad-0120-435e-b553-3cb039a6a2c9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_e741f63c'), local_subscribe_addr='ipc:///tmp/6de270e3-27d7-4282-8278-117949320a93', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:24 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:24 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:24 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:24 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:24 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:24 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:24 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:24 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3529696) WARNING 06-28 02:57:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3529695) WARNING 06-28 02:57:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3529694) WARNING 06-28 02:57:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3529693) WARNING 06-28 02:57:25 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:25 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_aafaa12e'), local_subscribe_addr='ipc:///tmp/67dc1f4e-7410-4b14-99a6-9177125c8985', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:25 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:25 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:25 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:25 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3529695) WARNING 06-28 02:57:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3529696) WARNING 06-28 02:57:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:25 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3529693) WARNING 06-28 02:57:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3529694) WARNING 06-28 02:57:25 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:25 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:25 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:25 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:25 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:26 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:26 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:26 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:26 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:26 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.878988 seconds +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:26 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.886175 seconds +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:26 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.919960 seconds +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:26 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.959284 seconds +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:32 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:32 [backends.py:430] Dynamo bytecode transform time: 5.59 s +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:32 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:32 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:32 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:32 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:32 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:32 [backends.py:430] Dynamo bytecode transform time: 5.80 s +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:37 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.381 s +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:37 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.387 s +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:37 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.395 s +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:37 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.436 s +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:57:43 [monitor.py:33] torch.compile takes 5.76 s in total +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:57:43 [monitor.py:33] torch.compile takes 5.59 s in total +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:57:43 [monitor.py:33] torch.compile takes 5.76 s in total +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:57:43 [monitor.py:33] torch.compile takes 5.80 s in total +INFO 06-28 02:57:44 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-28 02:57:44 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-28 02:57:44 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:57:44 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:57:44 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-28 02:57:44 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-28 02:57:44 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-28 02:57:44 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3529696) INFO 06-28 02:58:10 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3529695) INFO 06-28 02:58:10 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3529694) INFO 06-28 02:58:10 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3529693) INFO 06-28 02:58:11 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-28 02:58:11 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.11 seconds +INFO 06-28 02:58:11 [core_client.py:439] Core engine process 0 ready. +INFO 06-28 03:10:49 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-28 03:10:49 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5092|± |0.0276| +| | |math_pass@1:1_samples|0.8303|± |0.0341| +|mm\|arc_challenge\|0| 0|sem |0.6352|± |0.0247| +|mm\|arc_easy\|0 | 0|sem |0.6177|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.4781|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7606|± |0.0202| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9000|± |0.0480| +|mm\|truthfulqa\|0 | 0|sem |0.3058|± |0.0421| + diff --git a/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a4b0f69dca2e77fed7d291d2b4aaa15198c9476d --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce3917ead928e2c0b135b0486913789ebce9bf887ab523b118e4dbf44b3d98fb +size 3529231 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a8a9552b255b4442f393428393b58c00dd6c9561 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66410b03c3e808718cb27f85ed11686da011eb81f1b30ce185527d6cb428735e +size 8156073 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9fe8dcb4f08acc1c2459a8dbe22ddfbc91d8f88f --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7377a29f21f91b2bfaf75bbf93a1b2f128fa753f1cefc634829bcb1583732bbd +size 2862028 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..131554f5a972274b764fc80640e56ad6e65a8db5 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbf37c3148192aa67066977a11a4870049c30d3f71317e44e3e1c65c4cc03dcf +size 3039563 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dfdcfc57167d0d6d6202f506dc61e1195493c8be --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b84257797190eecdbc0ef3e6cb8d4b4e1140e78fbe200889ef405bcc876f98e1 +size 316667 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..84e931ec7118fec5a02797ec049ac06e71f513a3 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:613a394111e16909114efc1a671aa7916329532d45f0eb62d35997e3219189a9 +size 1148077 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c5a3144b7d7e95bd2ce2aa8220b8e48e2ed86cea --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66e45619e88ade20bf58ac95773dd63dff38803210a4384bab83d8b889231794 +size 3518921 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54e02c7a0bc05601b4d457d5645545298188f931 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d59b8f7c561bc7d2e58d99395c5b50d9d7f0b8c7c590003c1f92a8db017111 +size 8166781 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bc6fb456374f05114e6f04087c945aaafa71be8f --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a67f6187601b7140b54906bb296894958bc31d84dbcf18c4923a62d11105c33e +size 2859764 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6724c891a9b538d7d62452855df5eb8e36bb1080 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:081b137da3e9752b00de835a085365af6036fb0b27cfafcc2f495f3fbdad462a +size 3039375 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e7b5c208805103e88f3b58857a1ab979d80cad35 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:589a4bb3ac1b5db19871cb67d76a9a3d636887d0ab07c1ceefb7cee55fab7ced +size 317874 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3056b1b6ccd94ba5b1da508ad3903d5bcad531c2 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4937003c6389d8dddc60b3dcefcfeb410aae72eb792fac7116d4a94913d2230f +size 1146204 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c3e50b504bf0ae4bb8c7ab735beb4e4f25c6fd9e --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b66af07142cf41943266bfde37ff44bc060dccd2e6b9b05f85db2e7c4884e33f +size 3524830 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..52bfb65ad87016facceaa810baf240517e53816e --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb24ecc922da45dc457507d7d08ac984d0b5c371bba77756a5e951bda588ee0c +size 8167635 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5ccf99cf30888378c04cd6864593fcb0036ee997 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2be5f6739cba12997dc33b547ae73dae4b91e66747ba2198768b883683cea7d +size 2860274 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e51d122cf03f482e5cc0d9cf0249e38e9dd7734e --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bca27b2a64be6e21925a252fe612f0735ff5c66e446d021b0304e2e869eef67 +size 3041451 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7fbe009073bd802bb239b79ebe10912a73ddaa69 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d03877a90803995a1fbde66b9794b79d2cf4cd988237cbc3433fda4a6ef13a2 +size 316015 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..56c43798f773b9d8527300c8149988e53c6f5b50 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e154c36019fe5a6988dac4c3b9e431498250ecedb14dd0c943084a4a30d02a5 +size 1160729 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a7f370c58870c1edb441e9e5ffa5e8351d9e11d6 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c76cb30736ade86709aa04ed47b9e751827496920a2d5ac56ab5fe5946f9abfa +size 3525826 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5a69f0374efe0ca23695405f475a702cd2b4e57e --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f69b72ef231253ae98abac3bf1773b88a120796ee43045e2e09779f393925126 +size 8160872 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..128985eec54b1615e0865a21ad3eea23a2949b60 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44986ca28e67ec78c6a9a018bff99cab2c960ad43481cf2c41afdc94570b2c36 +size 2859380 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..95d5001aece4a1804513ae745329210b0d109bf0 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24bfe43d3bcdb60022e33313762e6192c16b45bd77d27eb0144c7f757171e028 +size 3037251 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e266f1b1cbff091a8f074cb5c6bd4e485db2835a --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5beecb57a6db31e1b48627e423d496113472d7219f2de9a9baee0db87f9de707 +size 317618 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b858b17fd7c1493ff55dd1e1b31d0ce78cfa84b5 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:536d6542711b3f391401857382ead851e4f74b6a8ef320fb2a638c1a0853b6c2 +size 1157572 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9f4ea4dffca46b0e8b8e5e449a184943faa0889a --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90f6ae572517fcefc31756866b8f09ddc9ab46d6fc999f100b1652cd8591cda2 +size 3525424 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..79b4434a528ee2138eb1bb319275b94931e7e7f0 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:649829f0fbba944c41f92eec62a18984154f3a2ef68c2e7f6f4774da41eb9ade +size 8162160 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c8f1d90cadde7cb098fab32faa68444362089f41 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:142c54d6ce2f00b9ff6147042e0f5c46216a7143eaaca97f7fb912b6250761d2 +size 2869729 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6d7446c2c1a80d953d716f857342e4fd090721ba --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cfb7f539692f9676f4aa767e6399b8968e297b487453d35451a5d1333bffdc0 +size 3036968 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..81895f4b1b930ce25308ce51abe75590f393ae64 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6629b06ba8550dd0626228f8adad8921962070d0243f91dafcc689dd105738f8 +size 316488 diff --git a/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8f52df020c285653eadeb085f8b15309fbd20368 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36a4bc0b8192423e34cc0bf9e1e46e2e161df2aabe85333c42d322f23d140e11 +size 1153686 diff --git a/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5232ac9a87444e42b93242d196fa8b901d982809 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c92aabcb8081bebc4a812361f0a3c2760c8670aa955a5505f0a8a666c54f121 +size 3526608 diff --git a/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..26c91b86c5372486cec211f90faba5267781b9f2 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ead7d84fed8da7fb8600f3523da4c2be73774f3014d6a99ea0d9d5cc6e1a9612 +size 8152637 diff --git a/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9060824d1ce22d35266e1e1b5ce235da4e73356c --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3ad5977415ac3af2a80b5589bcd8486022ee77b34242af75a0bf58e18dc6374 +size 2866697 diff --git a/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ff524e9f4ff107e1e3c25de5e174cd9b93bf03b1 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:780dda1f8f909e07eae7a4dc84d998edabd4bc4a23b4a5ce7ea0c4398c8d9cf2 +size 3036897 diff --git a/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4dc938ecb3361dda13caffa3ec829ca8e76cb994 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f7a9ebe4b1041e23987eafeffb704fa82481e948f0817d965d9a8d49f3a00f8 +size 316146 diff --git a/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1ecae29b20a77997f14c2674c85f3853b977e539 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:578eb25aded6ee56f07fa36b14a66c6b6e22532b1a3485607b1b6bb070c94519 +size 1149267 diff --git a/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cd8f63d34395273ade443ee41648e78644fce840 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c26fcc70845ef56389310fa53978d0ac5ab63b5e08e199b29a0c1115f291861 +size 3522369 diff --git a/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3cf60cc1f6c3e6df25a0c9445c2600ff0efdad06 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0374aa491c4fa540f14e76a34158b746370ecb3a823ebef929f2fd846231e364 +size 8153768 diff --git a/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6c9a0984ae2f8f4d881d3dca1e3a635ed88a809f --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed93de91e4ea24cf4a01c6668f20dd2b91c3262c0d0a361bf144f12b10942bca +size 2865723 diff --git a/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f27b0568fb6f095bf257a28665a65d862e371bdb --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:812df8dd56c3c1e90d3ce0d84f31c51c193747ac33b6ca3b13bbb609fdcf0998 +size 3042926 diff --git a/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2ffd1e4ceea115d97c66fa7715c59798bb037888 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc1278c3893bb847c55cf3376190fa675489fbf878dec3f965b6f4350e126b32 +size 317282 diff --git a/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..05c4b6cab002e0a0ed8f1018742a06b9c7c93bfe --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b31fa134e57128219701157b16afc8014eebbdaad2d31e382b0077f98a88b809 +size 1150967 diff --git a/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..07aa9f4c7c0b1f6cb1978b43751d2754054407d7 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6d4b95aea81a6c0b6af2aec7c2edfb549e8026f9a0f1ce92233572d307002ba +size 3517381 diff --git a/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a8836ef51801a6fe9ca6da65281f5b08030aece8 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ce6bb0a2125326872d7261d78f27132f1e123574edc9475be976e5fb816999e +size 8162809 diff --git a/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a1640f289d9fd5e3ecdd0f4eb0ea8f73da0c6326 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5e9a05422626b9db467d09aac9f18d07455f8b3e44021a7fdb6b2c2caac6483 +size 2859732 diff --git a/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d6557f9c598d9e993cb8b93d004cd4c03b43ae06 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9acc60c28f48d2ab315040881b8bbf9a8b4214f070ac528c2e0d5977afa512e +size 3035450 diff --git a/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..803489542a7cc91b1904e31a1ea81b8b9bb7cdc0 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3bdac581ed784021dea2e48e90932eaf91887ce29f6f92f3d846f8d16b28670 +size 316023 diff --git a/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fe54a2c5cdfc4bc3510a8b2c88248b304b3d38f7 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc6877a9fa05390f4b3f823b74f8383eb304a3076397d0b23e11f075308da0e2 +size 1152707 diff --git a/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c63f615364f20154497c46b4e2017f627b5b0312 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ca8621485136b08c57c982ea7c43f3998fcdf79086c35da971bfbd7764c14d7 +size 3530666 diff --git a/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..67da6bff0da85a3e79d185f843d6d9f784cf2c23 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23c44cf64e865802cd2ebad28fbd272c7674b89eb218faee7239735056cfce04 +size 8160796 diff --git a/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e4bb238a8bcfa5696e5c181641281ad3205c5f78 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c59293f419a4484fb8fdd3036dc072b00055c0a828d2b1605c9ea687068d952 +size 2864681 diff --git a/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8ec5138d5873a98e8cd40d07c118cf26d85d0271 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bb8a759a3899fa57fa99dc27dd1ebc548aadc9b3095f8972e1bb12f7fb3b611 +size 3044684 diff --git a/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f3822a60be3939a7a8704efb96ab242d4fc94294 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91cb24bf86c22adf028b6c7303baf24ba11d2b6f61edad8e4dbae1ca7e77d5c0 +size 316410 diff --git a/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a4865da79f02be356fa414b9c412e1e5d635f5b6 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81197e21a213674b407a06826da6f0c55219d28e62a0bbeae714c27cf3343a43 +size 1161445 diff --git a/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..aeefe06a3bceff567a37c4d124e0c877821ffb91 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05e7a74b8ea099f0f81ebcfd3783572c8b007e66698427e19afafa5d570dc345 +size 3527562 diff --git a/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..da7ec2bc4b458e6ff58eb4e86495407121f2f3da --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e711d872856795554c604e356b0ccc739a5881924724a6f8c42bc8e12267d3d8 +size 8149061 diff --git a/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..53f1ff7811a8624a95f967355f1c27899ef9bc0d --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1baf8c4423b398deb08d278b32df7d92f2c5320bd6b588d7136e8c45a8c9a9d2 +size 2857370 diff --git a/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..34d3e6f974a62e2cee3ed8abb8e0dd908864dedb --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66b3f09d350e6cc481afeace58032c063eb8bd88d1099f2309fb68edc5716e27 +size 3040477 diff --git a/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..06253e74dfcb7856baa79bd0b9d0305c029c9691 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecb6f039fce61fdbda62d81a88b130ec025bfcdcb7c6e8c4ec897eb908edc0fc +size 317464 diff --git a/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0cd3b58c2df17c0d1527e9aa102be307600036fa --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:356ba3a4f70826212d01dc9aa0041f8b6936529a6c87dd76719c06c957dfdf6c +size 1151360 diff --git a/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..905bb3f89695d4a97f785ee5ac6cd9bfb1f6e244 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a53d53015cea0b70e94f406971b092c12bcc3133412d5ec5749d93c9899d807 +size 3519973 diff --git a/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d499c4334c18ac4d512b26c88e656316f08ff07d --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33c579dc3e99dec763e529f2b0ff87f511db5f6083cbfbe5f93db28a4a237037 +size 8154036 diff --git a/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4b766f0211d8109ff92a55b145cfa4e5c4cce627 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da02a53ab073cdc4989eebb8ef70af5b4c08b0b70a1e3047fa44de44f8401ff +size 2857341 diff --git a/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b9b38b07fc0bdd3327009eb1c34841977779a782 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:966480185226cd0a03b470cc2bd06a4dacbbb1bf2d93205bb200519da5002db8 +size 3039052 diff --git a/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e4881d81463614ddb0c85745e796a98c704794d --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e27935bac40d889fa9f2ea4aecfd283579c5d5762a8db2112fb46f5f0e2c19d +size 317950 diff --git a/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab4ca20c4f6d3aa94ffd45436bc75582f73dba53 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:905f0a6e91dd486183a221ff8641a3de7f3203145a3699eedb5793f321eb49dd +size 1143279 diff --git a/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f5f68ed6567a97a67f6fc242b14aa83724aae248 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37efdf5867287da2ee0d491fd56aef54d71c7b386bb0b65bd947ab55e22ef75c +size 3519748 diff --git a/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f529cf1ae8b24ebb3db4e56bb83de02e0e9f8cb0 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:896f253f9b512b24a1c69b65bc48038cea8511316aae0d09d14ee530dc77aabd +size 8160129 diff --git a/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1eea36ee0304427afce1fd63f59b3a1023b9dc42 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d780ba07fad5d94e165212105b2b81b8ae1af2ce2f574d6a5c52e482f5b9a99b +size 2861194 diff --git a/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7115a84d26356826b760ae012c75923a484a54bf --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b838e3e65e22fe78a219be5be82406c717f4878c4604c59f2b53d2a9c8bbbcc9 +size 3042009 diff --git a/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cfec4c8ab892e426f671a1ea8c50c6d994c37cce --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d84e0d9fb4c48561a2d68d6fd5d45f6ff687a52525d33c343406afa9fdca95e +size 317929 diff --git a/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d37fa99265b2556787e359ddcecdf7420d112ecb --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fab086bca933a20542da1d6ac57784af0fd07e20d7b537361fe5da5977cc543 +size 1150811 diff --git a/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ed0ec0f83d17d6f0ad196391b89500cd4f0b9500 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a9a6f4b98aeeda565dd7d79b93e19428c9244c0fa2115edca580b3b9d8a6ead +size 3510995 diff --git a/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..72fa84f5146b3f54db84c0df7669253e9e7f2b4f --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e34a64dcdc522bb213922fc68408827586060fb8673813661839be682d9377d7 +size 8157218 diff --git a/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d1275ac39c9145eeff491a68a2c4988508923717 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f3c9ef785b6132d6f3f9e61f4d875a6bd786da502dd5dcccc0f53f18edc3a1d +size 2854461 diff --git a/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1986aed1d61d4e0524d69ce1d18d8388be8c1eff --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36ba424de3d540aa364f4e107c02ae39f7cf9b9d111af5ff1a0844bfdd6d2d92 +size 3038338 diff --git a/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f10c5b6b878daf636a6493a5368e5bc67dcd6d6f --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fdb0bbc7215a0e548441024a707d0fff4cc58ce7c03a02b7cc08cfe52454242 +size 316824 diff --git a/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0b6ee6afcbb66f090b97bbe73077949eee13c553 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef10e0ee76fcf174092866d40513d72d505bb51d694a3c87506a8b1145c8b04f +size 1149037 diff --git a/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5c7cfbc373018e420a31a48668252a1bb6410a8b --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e7aaf027e10ccadb408bb8fa8c755768378b94e87ed3bddd6efd6a850268ae0 +size 3513866 diff --git a/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bedb48a7819dc6ad4e41fdae2479434103e34817 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2f834b89401eacccc92165f90faa3a1cb2d91a88ef9e1b48e3c94e5af7908c6 +size 8152326 diff --git a/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..33e64ee8fbc5cdc1de8ce072bb1b0a863c577512 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:198b5e803c10c36b6476f90cd02cfe42d65f3d39f5caf6cd9a45bede00766c88 +size 2867318 diff --git a/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..72ca6d66461c9aa558a149b033a4ffb97338aa45 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4fe1ae34dfb0bbda036cd9f9af2cbccdf02218c80b008763a6f0c6713c5a8f83 +size 3039228 diff --git a/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ac9a620ad6dd239149fc07f479399cb9c68fc4b8 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3c5f5dbeabee1f75790d667ebbbc4939ace04714bfc2da8f64933b99f019163 +size 317108 diff --git a/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..649a2a2dd7113fd8b277b1da5c0275aa3e707bb7 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8feb5f36fa958fa6ec650c28ca11931e43eafcf08b75c8dc191b5a427df3baa +size 1159901 diff --git a/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e36708a8bfe48d134da017bc331d92a9fe387cc6 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9309fffbdea104f506dc241016fa936d999ffed53faa9f95b8b889c66c024d0e +size 3521691 diff --git a/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4ccd27f5a33235f641896e474a26fdd418ff4f0f --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a81fe5ac3d5252553107ac7de673e8693ede0b87e5bff89b86dc1e829d585aa8 +size 8162242 diff --git a/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..de0d6cb4cd2b9b60b9334fe072a6ba7a0015e6d4 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11344e624431f51d8cf42a6043bdda8bd72ad21d98bedb13579c2971cbd0326c +size 2864049 diff --git a/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fbec6aec1dd8e0ac7bce4c00649f4eed25a62697 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1317c7512bcd6125b8afad51b78362f00e52c404e4326b4e9351060542d36ffc +size 3039799 diff --git a/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..356ec0375431ccd387e38fe300dfa57d6a28de63 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e1533fccc793f31ce3bb08971270093e2f2292916f1d9fb159964f062d11f54 +size 316347 diff --git a/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..624cb4ffb7d2181988432a383296c4d40643e002 --- /dev/null +++ b/merge_bench/outputs/._merged1_llama_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bf7b9c2bd09fc538e344fc8b4e9462c65ee053a6e893d5203b1ca1d8475d78e +size 1144502 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a22ee3a516895cdbd9dd6211083ef081a55b69fd --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d5378701803557548c28514e2bb6143bda01cf5a58bc8d6db143d526222302d +size 3527402 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8bbabf9391f817b9b576870c9256f6e7628397b3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ec82c8ee8d78d0eb3fa64a0656d64018d90916bac1b8aaaef64b5d1d60b6a31 +size 8163086 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9bb6550e461f46e72fe9dcc51487e0509a78f025 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50c78a2c8dd4ba59d8bc4cd5b0279890e0115cc9faeacdb99298040589bf6ebb +size 2855661 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bb277aa0bd65099e9b19008e3990108bb16cd930 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:229395165fe11106ce2753da1cd547e263acdc33643516191d48a76cf521b7d0 +size 3038184 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cfc01a2b07e1659abd0ab3463f7d3c78c1d8cd36 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c2452a6c53bded8a9d6b6c3f4bb0fbd7a3f36f25a8ed5019e28150422115a1f +size 317799 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2eb056f57a2376f875e87074aa3484479f0b6b7d --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bfe6148957574c9e53a27acce9f148ce8797795036666ab8ea29612c020303f +size 1151483 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d672d9856477dc8c106a34ef2c74f0f679de4165 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b3b44197c2cbed44d53f29212ad21e8f2de95d726505e327fe20282227afb70 +size 3526996 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5ae86f6ee6ef074bf6db6ad5757281b62213d975 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:249a489e1bab06fc22bb864767b4a8db06949d2b6e9167bbbfbaec693fe4974f +size 8160687 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c0b04ef4690c6410faea1f622005b69ac86d5b2e --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2442e85cc310a3b140a1393a5abbf32d8ce31e6d6f83a609849d3b91f2256e7b +size 2863711 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8fb4ce2342e4d16156e0a965adc368a794705925 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b91940cda488d9e6debcd30a6a9ae69090238f5c2b5faabf0df1791dbf14f897 +size 3039775 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ef37fbbce5fb51af9f90610fcb1f341baffd00c1 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:092fa723d9f73fd7454fce75e868aef8ae01a92e0e4cab3cd194fdacf4102fc9 +size 315842 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..65101d66c20f60a30363d3caff2bec54ece8a6b3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c1534d95761d43d89a1f97f3836ad9bb52bcc496a57aad612b34a812d581276 +size 1152355 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a6665b6251cf96decbeef3d7210bfd4abec4a6fd --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1965b798a3930dc270a16560b631355b3cd942812df34bf44c6f80c6b75a5eb +size 3520724 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ce873cdb0f07dafbddea64bb0afc14d218ddc3f5 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b3ffe9384313bc0481d2ec9b431d4ff476a4f258acf2f04bd438712614d3aa5 +size 8161039 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3a26f62c3a75e1badeaf01eb0021b7935fc20fed --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df63f1cb051c8f401ebd783894d3ce70f0a2729331337e1a1fc21766bfdba332 +size 2873232 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..407cd29e6d32b86a38fedd2af72cf4124066bec3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1529f725d0e1f361bb8a2f762788cffdab3f178d1c4d81a71a97361360f000a4 +size 3039651 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4d434094c0bd61446d3370ca42bbab2f89bd5a48 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd20a0e26bcb1c60a42a0ca0ea140ec403e88e74663fb26341ed2e0b98644d48 +size 316768 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e391fc4983a24a6af397b3112e71f3182949988e --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4276c6761335ad548b094813b695e70e37c70137a4c5bc74c965f4588bda9594 +size 1155033 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1266934401a688250c8c60b8643cb58362718354 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a32e7c2b77212f1e7d757d0df0ab5378e7e330ebd16eea4df989b866fd16e569 +size 3527048 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..025817914c007c2b64bf217bc97b42a512861425 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83d007c9e1edafdc820f57f9cca3704e1615fa1565c70656ce58284c0646c74a +size 8155919 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..659248431e7639bbccc4bfb163ee454610ca0b20 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1186010f05010a9a599b595fafac957861e766d3afad60f81588f5a3eb084edd +size 2855574 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fe890621959dc20c79aa9b4dee96878e8b7003f4 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df0fa369ba5bb6bf6cb90279a93efbabf8892122c5ba86f17d0e2f126877dcc6 +size 3038724 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..01a893278e84096ab9273db2439cdd5730a5805e --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6059f0a840d8988f4905250d403e447dfd062443b4c796dba13a465c006eccf +size 318016 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..aa96d6b14341c60377439db841438c63e74c0969 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f018901db662a1bc485f8f3a7d478036d6eb53c1ce4f95711f7491206d9e103 +size 1146441 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..52e19a3be6535d7e49346c817ee534b24e39b5f4 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47882894159561a636976a9cf9de0f0de98b558be616cadb8b80af848021c752 +size 3520892 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e8fcdce3780ac3d0c89552e72d801a65286fa3f3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f3906ecdf47b9a9149e629c3eacab92d464b043559a0f21bff1012be36472b9 +size 8158667 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c4705a1ef0698be11b11a92531f04e73f99e80c2 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e264d2ca4fc4d10b548366291d2ac6a2c906d0fbd76aee475cb7c996f46291ca +size 2866876 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4acd7f079d0508381fddbb61b5e0834848b84eee --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cda35a493658f1695182056e8c3f7313f3ca16a9bcfb45ac93478211050a8eb8 +size 3039315 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..467ddb5250c340230d5b4699282847f97de431e9 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c155e19f4b53d9204ea6e67efedba73eea8901aafdb102ebba9a1c3d51240215 +size 317685 diff --git a/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e3f192fa7715a627c9fdc7c9e1ae7f447fc7109a --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69b076083560ca06d632d05b3d924626496fcae6f9e2e40970cff25aa4a796a6 +size 1146132 diff --git a/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9f4ea4dffca46b0e8b8e5e449a184943faa0889a --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90f6ae572517fcefc31756866b8f09ddc9ab46d6fc999f100b1652cd8591cda2 +size 3525424 diff --git a/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..79b4434a528ee2138eb1bb319275b94931e7e7f0 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:649829f0fbba944c41f92eec62a18984154f3a2ef68c2e7f6f4774da41eb9ade +size 8162160 diff --git a/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c8f1d90cadde7cb098fab32faa68444362089f41 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:142c54d6ce2f00b9ff6147042e0f5c46216a7143eaaca97f7fb912b6250761d2 +size 2869729 diff --git a/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6d7446c2c1a80d953d716f857342e4fd090721ba --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cfb7f539692f9676f4aa767e6399b8968e297b487453d35451a5d1333bffdc0 +size 3036968 diff --git a/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..81895f4b1b930ce25308ce51abe75590f393ae64 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6629b06ba8550dd0626228f8adad8921962070d0243f91dafcc689dd105738f8 +size 316488 diff --git a/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8f52df020c285653eadeb085f8b15309fbd20368 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36a4bc0b8192423e34cc0bf9e1e46e2e161df2aabe85333c42d322f23d140e11 +size 1153686 diff --git a/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..234df601bc444c438f22ed0506b46e77cd879477 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6b42b4f89394e09ab6d0096bccd61664b2ff907afe1bc05d37325741c8d79f8 +size 3513679 diff --git a/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c60b57bc174d57e8532bdab5fab98a4e6893891b --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f775921fdeb79fb030028f86203a69630697ce3d45732620da82071d276361dc +size 8155721 diff --git a/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..53664a6089fbb1ce80192ae27de5c561ed8306c4 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15932714749af24756125adedf51106a97cad0d3e520c6c954f7495fb8049805 +size 2864722 diff --git a/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a7116e2259d53bbe6acf96b8b5196db98ef1595c --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14b9556230ad3571ca2cb167f944f70cf756ffc1d4d72a592f30c0d41ccfe75b +size 3040967 diff --git a/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..88b48530dd255c8e4e0b306f3c26d0792d5cef1e --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ad1057e96576d005fda7befc9512e11022d14f1c80c59ecf50253f63711e87 +size 316695 diff --git a/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bd6ff97a373ca5bde2b7cde9ab52bf7357a2fd1f --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_2/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:745bb0fd0701702bd510dae17af9568148a7a06689d1e4c24d5048bdfcfa75fd +size 1147075 diff --git a/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..02098c515bab539aed4ca08a0a90073c5688596b --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34d5e55a23995dbfdc0c2a7720be78411009bd596dd777c07f76dad690dbef80 +size 3520547 diff --git a/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1a2d008ad038e464ca62395ffcf38db35240ef9f --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:704cec304d2d45b882c54f4c779965ac795030900e94069ecd0eddb98ed25c4b +size 8148418 diff --git a/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f5380356b6a30ac6f97e58ae32f746e4908dcd93 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a81fc26308c62776a05e8ad29342f6a378212e0399486ebef2f7137e31ce01d +size 2869357 diff --git a/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f9570ae1166961cbd731c17e391af25c315416b2 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eafe476e117d5da2846440e90b132c5506758232d078c737a8a6863772e45ef6 +size 3038644 diff --git a/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..408113a80e3f19022209afc6e894cc24f42310f9 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:767c6cdb78126ceae8f21313d608b5181fd5a19ff7fa1f54606f809c8a8f1a41 +size 317695 diff --git a/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9c562890596b86f122f7c8cd66b66ea386206927 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d3d10fe33406c03c4e2c96c291d9b35963818657f29f92a96bd0c9cb668792e +size 1143384 diff --git a/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..465cc17990f200b3a5e647921776130747dbe595 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ada94979059105fc95a32fe76b5db26d2855756dfa3c69d96ab501c31236a384 +size 3525338 diff --git a/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..30968a6110a0b989fea1ac08cc069fa4b842e3bd --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c936369ed68cde110ace70a051676910707d57e0e09b5ea444c0dd1ed2ffe0d3 +size 8148869 diff --git a/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4a3931a4cf864bc7d804b0b8b222868fc1a43b61 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30503f56e4fdedb68ebdfb330936badf679b3b296a6800372e4b9e873e73579 +size 2856905 diff --git a/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ad26d8474edad9a5f09242878ef21ef0cf2c790c --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51078a310c35330170b097dedbfddb5f13ebc2069b74d835e9e53dfddf5fdd55 +size 3040859 diff --git a/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2200c52d64b5976b00867c68b1fc9086e756ab64 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05a898661b1f7d93dcef51d8c1bb369787074278ca59d269d01d263851ec3ef7 +size 316569 diff --git a/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..56b05aff6cae1d47ad8c394fdb80807f74f4e046 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa374447c0e17ca58271f4ef27bee5ba75bc7c44a88cbec8ccbc32aa3645bca4 +size 1140662 diff --git a/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..928c9c7be8a2723070aaeee4dabdff0ea1969bdf --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc8b7c92b7cca7630899f907614de64984bec55bd240e44472d337712e5db807 +size 3528492 diff --git a/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2abdae525a785ce43aff4123aa558eb86d16beb9 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5a9abb8bde1f55cc606632321ed91b54b131cb51200730f78b7b88f5cde36a7 +size 8151316 diff --git a/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..73e6c6bd7f68a3a4c9b5bb7b7de545d2c26622cb --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ac42b1f5d23f009949eabdfd56049b92720eb5a2b92cd1d869923caf37b27be +size 2861094 diff --git a/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab1245de7e0dd67796d94825bcb7ddf218feab03 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2750358923ea690fdffb960d5cc0289991e13f76a75ff1257a7f4e02d5740d4c +size 3039664 diff --git a/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1b82abe879530a415bbb7c14d82cc9d5252ea5cf --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27e990dec80e3c9000177721a9b3349caceb301d729da5b1cae24825ab1895a0 +size 317541 diff --git a/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b2610acbc6f0ad39e9c62bfd8a10667d32738433 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eb60ab95198a1f3d8070a07ca2c50de788f7800d0757ee1ab612c518bd4e889 +size 1154585 diff --git a/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a90d260cca697b2bc62de48c7464e8e8a3b242a --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c1b9d494113a592be2120d9385eb938bdd1338a6c599f0f5b0b08957bccd2a9 +size 3516063 diff --git a/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cd68315efb64fb8ffe279cb5f264fc35f0783cb8 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48d43fa45dd4ee17b7c9b13336d6fd31db220cd5c08e5fa19bbf5e14e97b2829 +size 8166660 diff --git a/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2e9f244284aca506efefcb1fccc97c9da8d8039d --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0945c450272bb9900b014824bb9ac264b25c0e5bae7be00b1443c4ca1db9ac03 +size 2865541 diff --git a/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5d27dc52de4e0b81e1459a9253733d99c59132d0 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a99f716bc455fb5a0d21d2a5cb9288f2125a0bf886a3bc1139d0241a90ba8284 +size 3035700 diff --git a/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1f1afba291797670a48884dc6dc5c3b1562a65fa --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b5b0554738d67e8df284ad3afced5670cbd5f614bd28d941f7fdd32fe1557d5 +size 316236 diff --git a/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c9691a9fb715085d66189d2f59f9cf2384a45a5c --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_6/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ed3f00c897402f96dc63776ae682b648c8cea840022efe150bf8044ac9e3f3e +size 1154067 diff --git a/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f596f39e420713228f52861ad784472f63368da3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8057e91af6f0aad6c1e0090bb8fd523656a12ae7a2f710e8610106a06ccf3a2 +size 3521820 diff --git a/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b3ee129507741fdc4bbf6540233fd00c22f0829e --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd66dc03fe0928fc083040ea19c464f39d10a05f3bee1ba8247a20a830bb5c65 +size 8160348 diff --git a/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ebdaa1bc9a12e6ed5e61501f18e3eb10a16d346a --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be125bd7f00a3e0c4be1b87d008a3ad16f49e82d21ccafb91da81ee56435b4a1 +size 2864150 diff --git a/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ca9609429577835fff325dd00eb999e1375166b6 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f56c962512997cfed1355671e422ccd86c9285672007c814ac7f779241942d2 +size 3037407 diff --git a/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ef113863de3756e5f17139e84b1d65e3a80389cb --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77594b0e12c7d0234c645e2fdca03d321aeaa93b13da98983a83bce31e0468a2 +size 316669 diff --git a/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..746b9c596e193cc9656e3a6c30d9724a2a005eec --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e34145e2f7d712791f47a16a4033824297d774afff111bb9e5ab2deb1b23b0f +size 1148853 diff --git a/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e0308dd8a0e37a1f26ae4fce8bf9e99ed665833d --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc6be1d156952f72ea5b9af2f2c3cb451272788ab6db348642f8e1f79c1f14de +size 3517214 diff --git a/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..93c806caceaaa8d4afd4f966cfabe77e6b0c6a3f --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57fe357241e72730aad767b2820adb03fba2f9133913e7f6d8877410caf3873 +size 8157712 diff --git a/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a58859e0034c5883c5a59dda39dc7cd278c79cf5 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98fdc5eba143f52736aa70e69f8417d6a9a95d63ce5673124ba015b6bac4f47a +size 2868302 diff --git a/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..836454a2b8b60de28c2a875ed25a18196145835d --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06ba2eee74d7783394fe9a82dbfc814be05d00ea57e8ffb85b711fe175d10091 +size 3038791 diff --git a/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b8ab20eb272fbcdb21fe6816fb6e45229ee6afd5 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a0a90030fdcbe80410a6757f82b3981856cac6a21c53d48a0abaf8edd40b2b2 +size 316932 diff --git a/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..711f0d3951184ee9b5a12eeb272ec17761d53ca3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_8/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d10225c9ef2d37254b8e5c0de37b79148919dac06cefeb8ec90d999beb66a18b +size 1156969 diff --git a/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f5af9471641ef80de1fe67c562aef2c0086a6585 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d567c32a13717b24a8316b1f31f143e5a2ea1f50b15c9aefd1960413404461a7 +size 3517408 diff --git a/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4dfec3268818a3700e28e28792cc21e30381788b --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49a213aa22afe36677f766355e1c8ee2603000e80761fc6a8446d170875eab3e +size 8159168 diff --git a/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..591fff4b945cdfd3c77bff105bc3578d9eb73b6c --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8093190acdc0a99c774e438aff13fdb1efb4801a72c624c7d06a54166cb4426 +size 2858602 diff --git a/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..65e93cb3e09cb50465eb6b2d6c8bd5e634e8addd --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4ce4a72a9eac29c5d851707090bd78a11e70b4041372c06562542671046cd61 +size 3039383 diff --git a/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fba93856a58225b2bcf93812a4358c1c5fb81e86 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4b6d1412321a7e003641b538fac981ea521b36f484a808df0c071cd4a48bda6 +size 317576 diff --git a/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5d76ca91ff19b1cbb31f423e40f3e7ea346845d7 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ee54d8d864932b798c859f0e9f08515d80762c037e4f98394800649b650fe04 +size 1156052 diff --git a/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a8a5ca392872b270dd3a62025ce0f40b6362d99d --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c185652bf9386daa52cbbe8d8c3d2cd38c7e4a35ced8eff0207dc7b7c98be41 +size 3514100 diff --git a/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..66b12bf016a08fc8fdb64d8e27ab8558a94c3223 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d588fd542b622964ca779988629e706e13b92bbe2d34f41dc55c290032123f3 +size 8162160 diff --git a/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..924d4512e637e05c962edfc46a9abf48a38c4f42 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76fd3a20e35aea81c6de091b848465247494a8f143db490924ff8c6b1aecba50 +size 2861031 diff --git a/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b55cbf0ef6d563cd111c881c114a3cddf9e839ba --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:097eb42a3910054b6dec719b1d84876bd0412d85adcc782c06404545aabe99dd +size 3035131 diff --git a/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ef511e4aa9c7883638d2b7df080a65caa1b0c670 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f833527f228fe43e5d113b7092375892eeec4f74767644e7db80ac7ecbaa68a +size 316590 diff --git a/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..43c5112d3b460e6d769710b787bcdc2b3d5c51eb --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b526352d76bcf9d26d7f94cd468f6f0c568c7a51d2b746aadc6a85f0c35db65c +size 1153555 diff --git a/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4cc8ff68f60fd57bcebb3aaafd489ae93e4766fc --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0477bedd21bfc8c8ecbca7d82294ebc58a7afed9f1039511425a4e80c8b61c1 +size 3527104 diff --git a/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a1270a1a461e133bab022489defad0fb3536abc7 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc82d3796974816725d460223170cffa715815e601f7951d366cadc261dad12f +size 8158341 diff --git a/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cebfa5cc5582fc2f1c1b70590741fe6522baa37d --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e28573853da031d2d2f3543011962c646493d4b8f22b66d6bdbf04bb2d7c42d +size 2866853 diff --git a/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c8c558dfaa3f996536a7d71de4faf4a60c2276d8 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6807e916552ad623727753fc2ad2a6ceeed0e38cd70b204d1c0c30721411413b +size 3040552 diff --git a/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..898fcb52243ec5d4021828375183a75e71070743 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5581b303b94b3e8535cf4801acd99f76ef0037525ebb85c46eeb724f412a0c1d +size 317207 diff --git a/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..58e762d61b489c0c79e34ceb02bae25f83ca7007 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfa1050a91c5b385da0e63e7484d91b438219e70f45fdffeef46336a71ac47cc +size 1155776 diff --git a/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..461c8565593a2fe3c12a2126caf0cd6018b3d7be --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02ffffdc5389e3479404a478e4cf5b78e67f3b381ad70c12afc63ca25964d641 +size 3524566 diff --git a/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..19b43fc41f0b3df55b153f68e61c8b5e59c5c329 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03bc808fd45043f14b371b351f0a5e3a14640a0126c6b90e3596a6419ef901b0 +size 8160504 diff --git a/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8c8138677c5a674edf0e1843812a65a0a3cb3662 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e6290544c4081def578e94f2ff13f88b601f9974440f5e3dd0f478b3add8f66 +size 2858666 diff --git a/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cd699f1167abfcfd72cf3858ffb1dcac28f496c8 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d62be38de69dd2dd72652e8737a0ff9ceb21f5c2ab00a8f6a438f6bb2b72b24 +size 3039977 diff --git a/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4f24380004853077f6fffb573b5e5be4079d8ffd --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c5f8466d6a16f26efe39b1b9831edcc78741f24884cd17a786cb030244b7533 +size 317600 diff --git a/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b46f1abfa466918f29aa1c24e4848c3ee625ede9 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67cc7ce85e27883fa73732b2ac4005a6ed48b00ab62e9f0067f2b286b92c2410 +size 1150304 diff --git a/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..225a0abde6118f8ddf14c03f067d4d2e6feef8e3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:106fbcfd69ca8b09e03b03079314f16cfa0e6ec095344f3132e1e04c81dfd963 +size 3527459 diff --git a/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..54e0125b46122b9ca460896382b9ead2cc0d9edb --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6fe23ebe935e1fa28cf1a5d7cc2a3ce761e0f0e1179e41c097970560f189127 +size 8164411 diff --git a/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..308ffd8338ef6f803ed13f43264fba619544f558 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f665bea2ff087853375c2463961725f259b96c9fcc34aa4fea527c054691fa9 +size 2866130 diff --git a/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..438aa3f89897a4aab527083df9b628356ab8b2f1 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77095039a700963f2634c2e6296cfc3fe48fe3180d8ab13790c6672fb748c051 +size 3039206 diff --git a/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f6ac442e3e13299568748e5e68cd8458001d0512 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2de5d2195a8b3185e50609a2ad0fd5f919d61e5690837cd7215eb964311d8ae6 +size 317166 diff --git a/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..53bee0e387749455414ba0b88284da87e76f0ca8 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d41938b0f1b0bb8faa7e7d40d008d567a9d03184ca4eeaa8eeb8cb2a8bbb8350 +size 1158452 diff --git a/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7f18b23199bed59aed9fd9ad3cafdec24fe825a5 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53522d1bfcb1c2a908a1b00a941f9273a7dc8accafec3cd9eaa2a6f47655fcd7 +size 3520795 diff --git a/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ecec43c4a961a554c5ca4f4ca8c7d5a1f4e16bff --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2103ce8b8b64e53e482a1c7738f89c9c9560f9f7a34a486e6e3e24625c81a2e +size 8169993 diff --git a/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bd0d8ea042ede73ffd37d9217c1f2d7c1bc4534c --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36f0981f671900a54ef2e0d2dfcfaefec0112c87ce07803ccfd8e1921df7658a +size 2867102 diff --git a/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e8ae4d9b954fc6155feae6b72c73ef90e291e1d3 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb089ef07b31bcf32b477ef22948f9276b5e2ea5c307750c03a3c31285e753a2 +size 3042008 diff --git a/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..962adc3b29ec1377137fc0c35d881ca3937a272a --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e42315165679413e05799db3d2ec2229f94a7bb5a42f136fa96c186512ba1488 +size 316601 diff --git a/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..423883752cf37ecdb1012db26f1191094981fc84 --- /dev/null +++ b/merge_bench/outputs/._merged1_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e5bec2cf0d45f5d0dd91b076ee3b75f9ebe1c8ecd10363b86b2b53d9719104f +size 1149153 diff --git a/merge_bench/results/._merged1_llama_darelinear_1/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_darelinear_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..de095909841f82b41638545763de2bd3604d6c64 --- /dev/null +++ b/merge_bench/results/._merged1_llama_darelinear_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.49375, + "sem_stderr": 0.027992438382232313 + }, + "mm|truthfulqa|0": { + "sem": 0.35537190082644626, + "sem_stderr": 0.0436923632657398 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.775, + "math_pass@1:1_samples_stderr": 0.06686668711812967 + }, + "mm|arc_challenge|0": { + "sem": 0.6010498687664042, + "sem_stderr": 0.025120180816809216 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7225950782997763, + "math_pass@1:1_samples_stderr": 0.021200062987295133 + }, + "mm|arc_easy|0": { + "sem": 0.6304118268215417, + "sem_stderr": 0.015693710628893888 + }, + "all": { + "sem": 0.5201458991035981, + "sem_stderr": 0.028124673273418804, + "math_pass@1:1_samples": 0.7487975391498882, + "math_pass@1:1_samples_stderr": 0.044033375052712403 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_darelinear_3/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_darelinear_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..6de39c538da2346aa12eecc6d1792dd1048584e2 --- /dev/null +++ b/merge_bench/results/._merged1_llama_darelinear_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.49375, + "sem_stderr": 0.02799243838223232 + }, + "mm|truthfulqa|0": { + "sem": 0.2727272727272727, + "sem_stderr": 0.04065578140908705 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.875, + "math_pass@1:1_samples_stderr": 0.05295740910852021 + }, + "mm|arc_challenge|0": { + "sem": 0.6036745406824147, + "sem_stderr": 0.02509201957502416 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7360178970917226, + "math_pass@1:1_samples_stderr": 0.020871996830370478 + }, + "mm|arc_easy|0": { + "sem": 0.6314677930306231, + "sem_stderr": 0.01568439453467366 + }, + "all": { + "sem": 0.5004049016100777, + "sem_stderr": 0.027356158475254296, + "math_pass@1:1_samples": 0.8055089485458613, + "math_pass@1:1_samples_stderr": 0.036914702969445344 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_darelinear_5/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_darelinear_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..c7e9f02b4c92a8cffa7d105b0dbdec951602e4f0 --- /dev/null +++ b/merge_bench/results/._merged1_llama_darelinear_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.484375, + "sem_stderr": 0.027980952958187033 + }, + "mm|truthfulqa|0": { + "sem": 0.33884297520661155, + "sem_stderr": 0.04320767807536669 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.875, + "math_pass@1:1_samples_stderr": 0.05295740910852021 + }, + "mm|arc_challenge|0": { + "sem": 0.5853018372703412, + "sem_stderr": 0.02527343084609575 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7248322147651006, + "math_pass@1:1_samples_stderr": 0.021147065396798177 + }, + "mm|arc_easy|0": { + "sem": 0.6335797254487856, + "sem_stderr": 0.01566551999965003 + }, + "all": { + "sem": 0.5105248844814346, + "sem_stderr": 0.028031895469824875, + "math_pass@1:1_samples": 0.7999161073825503, + "math_pass@1:1_samples_stderr": 0.037052237252659194 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_darelinear_7/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_darelinear_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..0d4f4560e01f467387a85b73edbbf59a1d542a74 --- /dev/null +++ b/merge_bench/results/._merged1_llama_darelinear_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.54375, + "sem_stderr": 0.02788725270865465 + }, + "mm|truthfulqa|0": { + "sem": 0.35537190082644626, + "sem_stderr": 0.04369236326573981 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.775, + "math_pass@1:1_samples_stderr": 0.06686668711812967 + }, + "mm|arc_challenge|0": { + "sem": 0.5984251968503937, + "sem_stderr": 0.025147589642002106 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7248322147651006, + "math_pass@1:1_samples_stderr": 0.021147065396798177 + }, + "mm|arc_easy|0": { + "sem": 0.6451953537486801, + "sem_stderr": 0.015555884023359098 + }, + "all": { + "sem": 0.5356856128563801, + "sem_stderr": 0.028070772409938917, + "math_pass@1:1_samples": 0.7499161073825503, + "math_pass@1:1_samples_stderr": 0.044006876257463926 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_darelinear_9/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_darelinear_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..206e4b785c2b0a28f8740b1421d6f2e478f6ff43 --- /dev/null +++ b/merge_bench/results/._merged1_llama_darelinear_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.5125, + "sem_stderr": 0.02798587585995666 + }, + "mm|truthfulqa|0": { + "sem": 0.33884297520661155, + "sem_stderr": 0.04320767807536669 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.725, + "math_pass@1:1_samples_stderr": 0.0714995069016527 + }, + "mm|arc_challenge|0": { + "sem": 0.5905511811023622, + "sem_stderr": 0.02522532580776334 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7136465324384788, + "math_pass@1:1_samples_stderr": 0.02140549985783347 + }, + "mm|arc_easy|0": { + "sem": 0.6367476240760296, + "sem_stderr": 0.015636600127289867 + }, + "all": { + "sem": 0.5196604450962509, + "sem_stderr": 0.02801386996759414, + "math_pass@1:1_samples": 0.7193232662192394, + "math_pass@1:1_samples_stderr": 0.04645250337974308 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_linear_1/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_linear_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..f0b64a0703d09cd9c9c768959167463b404221fc --- /dev/null +++ b/merge_bench/results/._merged1_llama_linear_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.478125, + "sem_stderr": 0.027967820983765136 + }, + "mm|truthfulqa|0": { + "sem": 0.36363636363636365, + "sem_stderr": 0.043913262867240704 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.725, + "math_pass@1:1_samples_stderr": 0.0714995069016527 + }, + "mm|arc_challenge|0": { + "sem": 0.6036745406824147, + "sem_stderr": 0.025092019575024157 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.6890380313199105, + "math_pass@1:1_samples_stderr": 0.021918347389232904 + }, + "mm|arc_easy|0": { + "sem": 0.6335797254487856, + "sem_stderr": 0.01566551999965003 + }, + "all": { + "sem": 0.519753907441891, + "sem_stderr": 0.028159655856420007, + "math_pass@1:1_samples": 0.7070190156599552, + "math_pass@1:1_samples_stderr": 0.0467089271454428 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_linear_3/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_linear_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..925450a562024c71e15594cec90cb19fcdf9c600 --- /dev/null +++ b/merge_bench/results/._merged1_llama_linear_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.50625, + "sem_stderr": 0.02799243838223232 + }, + "mm|truthfulqa|0": { + "sem": 0.2892561983471074, + "sem_stderr": 0.041391127276354626 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.9, + "math_pass@1:1_samples_stderr": 0.04803844614152613 + }, + "mm|arc_challenge|0": { + "sem": 0.5879265091863517, + "sem_stderr": 0.025249748773231783 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7472035794183445, + "math_pass@1:1_samples_stderr": 0.020579627171922518 + }, + "mm|arc_easy|0": { + "sem": 0.6135163674762407, + "sem_stderr": 0.01583190086185407 + }, + "all": { + "sem": 0.49923726875242497, + "sem_stderr": 0.0276163038234182, + "math_pass@1:1_samples": 0.8236017897091723, + "math_pass@1:1_samples_stderr": 0.03430903665672432 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_linear_5/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_linear_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..45ad9eb00440b02c120140ca1ca6cf7aba41c4a2 --- /dev/null +++ b/merge_bench/results/._merged1_llama_linear_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.51875, + "sem_stderr": 0.027974934901776306 + }, + "mm|truthfulqa|0": { + "sem": 0.34710743801652894, + "sem_stderr": 0.04345724570292534 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.775, + "math_pass@1:1_samples_stderr": 0.06686668711812967 + }, + "mm|arc_challenge|0": { + "sem": 0.6115485564304461, + "sem_stderr": 0.0250029955867546 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7136465324384788, + "math_pass@1:1_samples_stderr": 0.02140549985783347 + }, + "mm|arc_easy|0": { + "sem": 0.6251319957761352, + "sem_stderr": 0.01573908506764194 + }, + "all": { + "sem": 0.5256344975557776, + "sem_stderr": 0.028043565314774546, + "math_pass@1:1_samples": 0.7443232662192394, + "math_pass@1:1_samples_stderr": 0.044136093487981566 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_linear_7/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_linear_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..77a809d891f0f3acb15c342745c10f5a573876a2 --- /dev/null +++ b/merge_bench/results/._merged1_llama_linear_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.528125, + "sem_stderr": 0.027950302087016623 + }, + "mm|truthfulqa|0": { + "sem": 0.34710743801652894, + "sem_stderr": 0.04345724570292535 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.675, + "math_pass@1:1_samples_stderr": 0.07499999999999998 + }, + "mm|arc_challenge|0": { + "sem": 0.5669291338582677, + "sem_stderr": 0.02541862615034513 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7225950782997763, + "math_pass@1:1_samples_stderr": 0.021200062987295133 + }, + "mm|arc_easy|0": { + "sem": 0.6209081309398099, + "sem_stderr": 0.015773945650583893 + }, + "all": { + "sem": 0.5157674257036515, + "sem_stderr": 0.028150029897717747, + "math_pass@1:1_samples": 0.6987975391498882, + "math_pass@1:1_samples_stderr": 0.048100031493647555 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_linear_9/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_linear_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..bc16aa489752e3687e958160eecfe533e614b9cf --- /dev/null +++ b/merge_bench/results/._merged1_llama_linear_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.4875, + "sem_stderr": 0.02798587585995665 + }, + "mm|truthfulqa|0": { + "sem": 0.2975206611570248, + "sem_stderr": 0.04173349148083499 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.6141732283464567, + "sem_stderr": 0.024971798510833206 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7248322147651006, + "math_pass@1:1_samples_stderr": 0.021147065396798184 + }, + "mm|arc_easy|0": { + "sem": 0.6282998944033791, + "sem_stderr": 0.015712101189217512 + }, + "all": { + "sem": 0.5068734459767151, + "sem_stderr": 0.02760081676021059, + "math_pass@1:1_samples": 0.7874161073825503, + "math_pass@1:1_samples_stderr": 0.039162126443242365 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_ties_1/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_ties_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..536fe72b09c7cd8d21ae72cc654d8eefb58cda1e --- /dev/null +++ b/merge_bench/results/._merged1_llama_ties_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.503125, + "sem_stderr": 0.027994078772422815 + }, + "mm|truthfulqa|0": { + "sem": 0.2975206611570248, + "sem_stderr": 0.04173349148083499 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.875, + "math_pass@1:1_samples_stderr": 0.05295740910852021 + }, + "mm|arc_challenge|0": { + "sem": 0.6062992125984252, + "sem_stderr": 0.02506310338037055 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7449664429530202, + "math_pass@1:1_samples_stderr": 0.02063951985587468 + }, + "mm|arc_easy|0": { + "sem": 0.6335797254487856, + "sem_stderr": 0.01566551999965003 + }, + "all": { + "sem": 0.5101311498010589, + "sem_stderr": 0.0276140484083196, + "math_pass@1:1_samples": 0.8099832214765101, + "math_pass@1:1_samples_stderr": 0.036798464482197445 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_ties_3/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_ties_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..6d3b0f854d6d111cd8c7a6fb9493ab4818f37d67 --- /dev/null +++ b/merge_bench/results/._merged1_llama_ties_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.478125, + "sem_stderr": 0.027967820983765136 + }, + "mm|truthfulqa|0": { + "sem": 0.2809917355371901, + "sem_stderr": 0.04103203830514512 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.875, + "math_pass@1:1_samples_stderr": 0.05295740910852021 + }, + "mm|arc_challenge|0": { + "sem": 0.5774278215223098, + "sem_stderr": 0.02534005215663172 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7651006711409396, + "math_pass@1:1_samples_stderr": 0.02007394593229935 + }, + "mm|arc_easy|0": { + "sem": 0.6451953537486801, + "sem_stderr": 0.015555884023359104 + }, + "all": { + "sem": 0.495434977702045, + "sem_stderr": 0.02747394886722527, + "math_pass@1:1_samples": 0.8200503355704698, + "math_pass@1:1_samples_stderr": 0.03651567752040978 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_ties_5/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_ties_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..629050c2f0f847582767cd79178e91e91aa5b3f0 --- /dev/null +++ b/merge_bench/results/._merged1_llama_ties_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.503125, + "sem_stderr": 0.02799407877242281 + }, + "mm|truthfulqa|0": { + "sem": 0.3140495867768595, + "sem_stderr": 0.042369647530410184 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203486 + }, + "mm|arc_challenge|0": { + "sem": 0.6220472440944882, + "sem_stderr": 0.024873599945204095 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7472035794183445, + "math_pass@1:1_samples_stderr": 0.020579627171922518 + }, + "mm|arc_easy|0": { + "sem": 0.6304118268215417, + "sem_stderr": 0.01569371062889389 + }, + "all": { + "sem": 0.5174084144232224, + "sem_stderr": 0.027732759219232746, + "math_pass@1:1_samples": 0.7736017897091723, + "math_pass@1:1_samples_stderr": 0.04231544434697869 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_ties_7/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_ties_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..854cf4ca920fa6d5e7bfdeffdb5d0ca8c9d78dfd --- /dev/null +++ b/merge_bench/results/._merged1_llama_ties_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.50625, + "sem_stderr": 0.027992438382232313 + }, + "mm|truthfulqa|0": { + "sem": 0.2892561983471074, + "sem_stderr": 0.041391127276354626 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.0608434308444476 + }, + "mm|arc_challenge|0": { + "sem": 0.5774278215223098, + "sem_stderr": 0.025340052156631723 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.756152125279642, + "math_pass@1:1_samples_stderr": 0.02033277461576782 + }, + "mm|arc_easy|0": { + "sem": 0.6282998944033791, + "sem_stderr": 0.015712101189217516 + }, + "all": { + "sem": 0.500308478568199, + "sem_stderr": 0.027608929751109047, + "math_pass@1:1_samples": 0.790576062639821, + "math_pass@1:1_samples_stderr": 0.04058810273010771 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_llama_ties_9/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_llama_ties_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..84a086fe282e5894a25d52515ed07db5f37156a5 --- /dev/null +++ b/merge_bench/results/._merged1_llama_ties_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.5, + "sem_stderr": 0.027994625547792713 + }, + "mm|truthfulqa|0": { + "sem": 0.3305785123966942, + "sem_stderr": 0.04294340845212094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.6141732283464567, + "sem_stderr": 0.024971798510833206 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7472035794183445, + "math_pass@1:1_samples_stderr": 0.020579627171922514 + }, + "mm|arc_easy|0": { + "sem": 0.6399155227032735, + "sem_stderr": 0.015606946957923142 + }, + "all": { + "sem": 0.5211668158616061, + "sem_stderr": 0.0278791948671675, + "math_pass@1:1_samples": 0.7986017897091722, + "math_pass@1:1_samples_stderr": 0.03887840733080453 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_darelinear_1/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_darelinear_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..17d479193d6e46ed84f518dab2fb6309a10c432b --- /dev/null +++ b/merge_bench/results/._merged1_phi_darelinear_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.525, + "sem_stderr": 0.027959610367675462 + }, + "mm|truthfulqa|0": { + "sem": 0.32231404958677684, + "sem_stderr": 0.04266416363352168 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203487 + }, + "mm|arc_challenge|0": { + "sem": 0.6194225721784777, + "sem_stderr": 0.024907103682023018 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7404921700223713, + "math_pass@1:1_samples_stderr": 0.0207571650689442 + }, + "mm|arc_easy|0": { + "sem": 0.6367476240760296, + "sem_stderr": 0.015636600127289867 + }, + "all": { + "sem": 0.525871061460321, + "sem_stderr": 0.02779186945262751, + "math_pass@1:1_samples": 0.7702460850111856, + "math_pass@1:1_samples_stderr": 0.04240421329548953 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_darelinear_3/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_darelinear_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..ef24b16eacd56838b1d48f106d0e8349aab50d17 --- /dev/null +++ b/merge_bench/results/._merged1_phi_darelinear_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.525, + "sem_stderr": 0.02795961036767547 + }, + "mm|truthfulqa|0": { + "sem": 0.3305785123966942, + "sem_stderr": 0.04294340845212094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.875, + "math_pass@1:1_samples_stderr": 0.05295740910852021 + }, + "mm|arc_challenge|0": { + "sem": 0.5800524934383202, + "sem_stderr": 0.025318580565014428 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7225950782997763, + "math_pass@1:1_samples_stderr": 0.021200062987295133 + }, + "mm|arc_easy|0": { + "sem": 0.6209081309398099, + "sem_stderr": 0.015773945650583893 + }, + "all": { + "sem": 0.5141347841937061, + "sem_stderr": 0.027998886258848683, + "math_pass@1:1_samples": 0.7987975391498882, + "math_pass@1:1_samples_stderr": 0.03707873604790767 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_darelinear_5/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_darelinear_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..2ccc207b58e8956ec2b56f7a346d383be9a25c95 --- /dev/null +++ b/merge_bench/results/._merged1_phi_darelinear_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.50625, + "sem_stderr": 0.02799243838223232 + }, + "mm|truthfulqa|0": { + "sem": 0.34710743801652894, + "sem_stderr": 0.04345724570292535 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.725, + "math_pass@1:1_samples_stderr": 0.0714995069016527 + }, + "mm|arc_challenge|0": { + "sem": 0.6089238845144357, + "sem_stderr": 0.02503342961670324 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.738255033557047, + "math_pass@1:1_samples_stderr": 0.020814929694141178 + }, + "mm|arc_easy|0": { + "sem": 0.6314677930306231, + "sem_stderr": 0.01568439453467366 + }, + "all": { + "sem": 0.5234372788903969, + "sem_stderr": 0.028041877059133642, + "math_pass@1:1_samples": 0.7316275167785236, + "math_pass@1:1_samples_stderr": 0.04615721829789694 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_darelinear_7/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_darelinear_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..fa3d3c1b107f2d0ddd814ed7539a61fd43dc604a --- /dev/null +++ b/merge_bench/results/._merged1_phi_darelinear_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.490625, + "sem_stderr": 0.027989704184941004 + }, + "mm|truthfulqa|0": { + "sem": 0.34710743801652894, + "sem_stderr": 0.043457245702925335 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.775, + "math_pass@1:1_samples_stderr": 0.06686668711812967 + }, + "mm|arc_challenge|0": { + "sem": 0.5931758530183727, + "sem_stderr": 0.02520015979556166 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7315436241610739, + "math_pass@1:1_samples_stderr": 0.020984061328633338 + }, + "mm|arc_easy|0": { + "sem": 0.6219640971488912, + "sem_stderr": 0.015765349881245096 + }, + "all": { + "sem": 0.5132180970459482, + "sem_stderr": 0.028103114891168277, + "math_pass@1:1_samples": 0.753271812080537, + "math_pass@1:1_samples_stderr": 0.043925374223381504 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_darelinear_9/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_darelinear_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..7175979ecefda3ad35a4bfd16830edf1925e021a --- /dev/null +++ b/merge_bench/results/._merged1_phi_darelinear_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.51875, + "sem_stderr": 0.0279749349017763 + }, + "mm|truthfulqa|0": { + "sem": 0.3305785123966942, + "sem_stderr": 0.04294340845212094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203486 + }, + "mm|arc_challenge|0": { + "sem": 0.6141732283464567, + "sem_stderr": 0.024971798510833216 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7695749440715883, + "math_pass@1:1_samples_stderr": 0.01993989564680271 + }, + "mm|arc_easy|0": { + "sem": 0.6378035902851109, + "sem_stderr": 0.015626797418969232 + }, + "all": { + "sem": 0.5253263327570654, + "sem_stderr": 0.02787923482092492, + "math_pass@1:1_samples": 0.7847874720357941, + "math_pass@1:1_samples_stderr": 0.04199557858441878 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_linear_1/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_linear_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..51ee38e6949fff8a437736acec7b4fa1b0adddca --- /dev/null +++ b/merge_bench/results/._merged1_phi_linear_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.5125, + "sem_stderr": 0.02798587585995666 + }, + "mm|truthfulqa|0": { + "sem": 0.33884297520661155, + "sem_stderr": 0.04320767807536669 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.725, + "math_pass@1:1_samples_stderr": 0.07149950690165272 + }, + "mm|arc_challenge|0": { + "sem": 0.5905511811023622, + "sem_stderr": 0.02522532580776334 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7136465324384788, + "math_pass@1:1_samples_stderr": 0.02140549985783347 + }, + "mm|arc_easy|0": { + "sem": 0.6367476240760296, + "sem_stderr": 0.015636600127289867 + }, + "all": { + "sem": 0.5196604450962509, + "sem_stderr": 0.02801386996759414, + "math_pass@1:1_samples": 0.7193232662192394, + "math_pass@1:1_samples_stderr": 0.04645250337974309 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_linear_3/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_linear_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..f4ab2d5a385f7ccb40ec4c9b05fa2c80d31044ae --- /dev/null +++ b/merge_bench/results/._merged1_phi_linear_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.528125, + "sem_stderr": 0.027950302087016623 + }, + "mm|truthfulqa|0": { + "sem": 0.33884297520661155, + "sem_stderr": 0.0432076780753667 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203486 + }, + "mm|arc_challenge|0": { + "sem": 0.6115485564304461, + "sem_stderr": 0.0250029955867546 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7628635346756152, + "math_pass@1:1_samples_stderr": 0.0201398007512511 + }, + "mm|arc_easy|0": { + "sem": 0.6314677930306231, + "sem_stderr": 0.01568439453467366 + }, + "all": { + "sem": 0.5274960811669201, + "sem_stderr": 0.027961342570952896, + "math_pass@1:1_samples": 0.7814317673378076, + "math_pass@1:1_samples_stderr": 0.04209553113664298 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_linear_5/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_linear_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..e55a80da1ad1ed14939795d0b10d560c9e05f581 --- /dev/null +++ b/merge_bench/results/._merged1_phi_linear_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.51875, + "sem_stderr": 0.0279749349017763 + }, + "mm|truthfulqa|0": { + "sem": 0.36363636363636365, + "sem_stderr": 0.043913262867240704 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.75, + "math_pass@1:1_samples_stderr": 0.06933752452815363 + }, + "mm|arc_challenge|0": { + "sem": 0.5748031496062992, + "sem_stderr": 0.025360790748556062 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7293064876957495, + "math_pass@1:1_samples_stderr": 0.02103906971531155 + }, + "mm|arc_easy|0": { + "sem": 0.6219640971488912, + "sem_stderr": 0.015765349881245096 + }, + "all": { + "sem": 0.5197884025978885, + "sem_stderr": 0.02825358459970454, + "math_pass@1:1_samples": 0.7396532438478747, + "math_pass@1:1_samples_stderr": 0.04518829712173259 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_linear_7/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_linear_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..d92069dbc7703b5f167c2cf5263f6ca9b4438847 --- /dev/null +++ b/merge_bench/results/._merged1_phi_linear_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.515625, + "sem_stderr": 0.027980952958187033 + }, + "mm|truthfulqa|0": { + "sem": 0.3305785123966942, + "sem_stderr": 0.04294340845212094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.775, + "math_pass@1:1_samples_stderr": 0.06686668711812967 + }, + "mm|arc_challenge|0": { + "sem": 0.6220472440944882, + "sem_stderr": 0.024873599945204095 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7315436241610739, + "math_pass@1:1_samples_stderr": 0.02098406132863331 + }, + "mm|arc_easy|0": { + "sem": 0.6356916578669483, + "sem_stderr": 0.015646321359268533 + }, + "all": { + "sem": 0.5259856035895327, + "sem_stderr": 0.02786107067869515, + "math_pass@1:1_samples": 0.753271812080537, + "math_pass@1:1_samples_stderr": 0.04392537422338149 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_linear_9/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_linear_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..5740221e269ea7de49f7e27ead87440ad6344cde --- /dev/null +++ b/merge_bench/results/._merged1_phi_linear_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.5375, + "sem_stderr": 0.027915779630006632 + }, + "mm|truthfulqa|0": { + "sem": 0.32231404958677684, + "sem_stderr": 0.04266416363352168 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.5905511811023622, + "sem_stderr": 0.025225325807763342 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.756152125279642, + "math_pass@1:1_samples_stderr": 0.020332774615767827 + }, + "mm|arc_easy|0": { + "sem": 0.6272439281942978, + "sem_stderr": 0.015721175937976627 + }, + "all": { + "sem": 0.5194022897208592, + "sem_stderr": 0.027881611252317074, + "math_pass@1:1_samples": 0.803076062639821, + "math_pass@1:1_samples_stderr": 0.03875498105272719 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_ties_1/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_ties_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..50a361db855bb58727a9801dfa76d689ef4f7a47 --- /dev/null +++ b/merge_bench/results/._merged1_phi_ties_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.484375, + "sem_stderr": 0.027980952958187033 + }, + "mm|truthfulqa|0": { + "sem": 0.34710743801652894, + "sem_stderr": 0.04345724570292535 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203486 + }, + "mm|arc_challenge|0": { + "sem": 0.6010498687664042, + "sem_stderr": 0.025120180816809223 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7539149888143176, + "math_pass@1:1_samples_stderr": 0.020395593169900115 + }, + "mm|arc_easy|0": { + "sem": 0.6325237592397043, + "sem_stderr": 0.015674997706686407 + }, + "all": { + "sem": 0.5162640165056593, + "sem_stderr": 0.028058344296152005, + "math_pass@1:1_samples": 0.7769574944071589, + "math_pass@1:1_samples_stderr": 0.042223427345967485 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_ties_3/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_ties_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..4105e5bf37d73e4bc5db019c23bbb4564c6654b8 --- /dev/null +++ b/merge_bench/results/._merged1_phi_ties_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.5, + "sem_stderr": 0.027994625547792713 + }, + "mm|truthfulqa|0": { + "sem": 0.3140495867768595, + "sem_stderr": 0.04236964753041018 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.75, + "math_pass@1:1_samples_stderr": 0.06933752452815363 + }, + "mm|arc_challenge|0": { + "sem": 0.6220472440944882, + "sem_stderr": 0.024873599945204095 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.727069351230425, + "math_pass@1:1_samples_stderr": 0.021093402669316735 + }, + "mm|arc_easy|0": { + "sem": 0.6240760295670539, + "sem_stderr": 0.015747919721903637 + }, + "all": { + "sem": 0.5150432151096004, + "sem_stderr": 0.027746448186327655, + "math_pass@1:1_samples": 0.7385346756152125, + "math_pass@1:1_samples_stderr": 0.045215463598735184 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_ties_5/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_ties_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..26b994d6b9c785f5b8484366741327e9dfea6ba5 --- /dev/null +++ b/merge_bench/results/._merged1_phi_ties_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.490625, + "sem_stderr": 0.02798970418494101 + }, + "mm|truthfulqa|0": { + "sem": 0.32231404958677684, + "sem_stderr": 0.04266416363352168 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444759 + }, + "mm|arc_challenge|0": { + "sem": 0.5826771653543307, + "sem_stderr": 0.02529637410719136 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7606263982102909, + "math_pass@1:1_samples_stderr": 0.02020488555645929 + }, + "mm|arc_easy|0": { + "sem": 0.6409714889123548, + "sem_stderr": 0.015596898893325599 + }, + "all": { + "sem": 0.5091469259633656, + "sem_stderr": 0.027886785204744913, + "math_pass@1:1_samples": 0.7928131991051455, + "math_pass@1:1_samples_stderr": 0.04052415820045344 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_ties_7/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_ties_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..9d054b972cc9bff7a780c1f1d9b2175bdc15da18 --- /dev/null +++ b/merge_bench/results/._merged1_phi_ties_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.5125, + "sem_stderr": 0.02798587585995666 + }, + "mm|truthfulqa|0": { + "sem": 0.2975206611570248, + "sem_stderr": 0.04173349148083498 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444758 + }, + "mm|arc_challenge|0": { + "sem": 0.6351706036745407, + "sem_stderr": 0.02469439036567337 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7360178970917226, + "math_pass@1:1_samples_stderr": 0.020871996830370478 + }, + "mm|arc_easy|0": { + "sem": 0.6409714889123548, + "sem_stderr": 0.015596898893325599 + }, + "all": { + "sem": 0.5215406884359801, + "sem_stderr": 0.027502664149947655, + "math_pass@1:1_samples": 0.7805089485458613, + "math_pass@1:1_samples_stderr": 0.04085771383740903 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._merged1_phi_ties_9/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._merged1_phi_ties_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..4d508834fc939c2a9f3357506ec789a4194a9527 --- /dev/null +++ b/merge_bench/results/._merged1_phi_ties_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.478125, + "sem_stderr": 0.02796782098376513 + }, + "mm|truthfulqa|0": { + "sem": 0.30578512396694213, + "sem_stderr": 0.04205953933884123 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.9, + "math_pass@1:1_samples_stderr": 0.04803844614152612 + }, + "mm|arc_challenge|0": { + "sem": 0.6351706036745407, + "sem_stderr": 0.02469439036567337 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7606263982102909, + "math_pass@1:1_samples_stderr": 0.020204885556459294 + }, + "mm|arc_easy|0": { + "sem": 0.617740232312566, + "sem_stderr": 0.015799257271560774 + }, + "all": { + "sem": 0.5092052399885122, + "sem_stderr": 0.027630251989960126, + "math_pass@1:1_samples": 0.8303131991051454, + "math_pass@1:1_samples_stderr": 0.034121665848992705 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json b/merge_bench/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json new file mode 100644 index 0000000000000000000000000000000000000000..12c6ee8ce1ce5fd2328bd671618ce6afc8a48e1f --- /dev/null +++ b/merge_bench/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.834375, + "sem_stderr": 0.020813649923046133 + }, + "mm|arc_easy|0": { + "sem": 0.9767687434002112, + "sem_stderr": 0.0048976377483802 + }, + "mm|truthfulqa|0": { + "sem": 0.768595041322314, + "sem_stderr": 0.03849856098794091 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9351230425055929, + "math_pass@1:1_samples_stderr": 0.011663051339533824 + }, + "mm|arc_challenge|0": { + "sem": 0.9501312335958005, + "sem_stderr": 0.011166429074111667 + }, + "all": { + "math_pass@1:1_samples": 0.9675615212527964, + "math_pass@1:1_samples_stderr": 0.005831525669766912, + "sem": 0.8824675045795813, + "sem_stderr": 0.018844069433369727 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|math_500|0": 40, + "mm|commonsenseqa|0": 320, + "mm|arc_easy|0": 947, + "mm|truthfulqa|0": 121, + "mm|gsm8k|0": 447, + "mm|arc_challenge|0": 381 + } +} \ No newline at end of file diff --git a/merge_bench/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json b/merge_bench/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..2e3694c943e1ba2789daa020fb05032bf580b749 --- /dev/null +++ b/merge_bench/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.88125, + "sem_stderr": 0.018112192805211768 + }, + "mm|truthfulqa|0": { + "sem": 0.8429752066115702, + "sem_stderr": 0.03321244842547129 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|arc_challenge|0": { + "sem": 0.952755905511811, + "sem_stderr": 0.010883605491044059 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9485458612975392, + "math_pass@1:1_samples_stderr": 0.010460968487095353 + }, + "mm|arc_easy|0": { + "sem": 0.9767687434002112, + "sem_stderr": 0.00489763774838021 + }, + "all": { + "sem": 0.9134374638808982, + "sem_stderr": 0.016776471117526833, + "math_pass@1:1_samples": 0.9742729306487696, + "math_pass@1:1_samples_stderr": 0.0052304842435476765 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/logs/phi_darelinear_1.log b/merge_bench1/logs/phi_darelinear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..79ef1637fcd1a0ea69247836176b7b397bc09b35 --- /dev/null +++ b/merge_bench1/logs/phi_darelinear_1.log @@ -0,0 +1,96 @@ +INFO 06-29 00:49:16 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 00:49:18 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 00:49:25 [config.py:717] This model supports multiple tasks: {'reward', 'score', 'generate', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-29 00:49:25 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 00:49:25 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 00:49:27 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 00:49:27 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 00:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_46da9732'), local_subscribe_addr='ipc:///tmp/64b94e52-9e83-4727-87a4-7a2baa856ed5', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 00:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b58d970a'), local_subscribe_addr='ipc:///tmp/d331ac90-1cf6-4484-91a2-bf8ac890627b', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 00:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 00:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 00:49:27 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d922f50d'), local_subscribe_addr='ipc:///tmp/20b4d8e8-3e0d-464a-a7a3-4ab8619e229e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_bac26178'), local_subscribe_addr='ipc:///tmp/08198347-e1b9-47a6-a6a8-03434b4ed7ae', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2c90b296'), local_subscribe_addr='ipc:///tmp/7b7ebafe-4411-4d37-88e0-59b1d490bbb2', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:34 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:34 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3644820) WARNING 06-29 00:49:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3644821) WARNING 06-29 00:49:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3644819) WARNING 06-29 00:49:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3644818) WARNING 06-29 00:49:35 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:35 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_07eb3cbf'), local_subscribe_addr='ipc:///tmp/58645320-aa5f-44cc-9018-56c432746219', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:35 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:35 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:35 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:35 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3644820) WARNING 06-29 00:49:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3644821) WARNING 06-29 00:49:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:35 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3644818) WARNING 06-29 00:49:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3644819) WARNING 06-29 00:49:35 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:35 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:36 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:36 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:36 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:36 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.863439 seconds +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.886672 seconds +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.914179 seconds +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:36 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.964450 seconds +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:43 [backends.py:430] Dynamo bytecode transform time: 6.13 s +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:43 [backends.py:430] Dynamo bytecode transform time: 6.13 s +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:43 [backends.py:430] Dynamo bytecode transform time: 6.13 s +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:43 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:43 [backends.py:430] Dynamo bytecode transform time: 6.14 s +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.414 s +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.435 s +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.454 s +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:48 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.433 s +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:49:54 [monitor.py:33] torch.compile takes 6.13 s in total +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:49:54 [monitor.py:33] torch.compile takes 6.13 s in total +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:49:54 [monitor.py:33] torch.compile takes 6.14 s in total +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:49:54 [monitor.py:33] torch.compile takes 6.13 s in total +INFO 06-29 00:49:55 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 00:49:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 00:49:55 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 00:49:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 00:49:55 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 00:49:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 00:49:55 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 00:49:55 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3644820) INFO 06-29 00:50:21 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3644821) INFO 06-29 00:50:21 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3644818) INFO 06-29 00:50:21 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3644819) INFO 06-29 00:50:21 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 00:50:21 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.50 seconds +INFO 06-29 00:50:21 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 01:03:04 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 01:03:04 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5254|± |0.0281| +| | |math_pass@1:1_samples|0.7352|± |0.0453| +|mm\|arc_challenge\|0| 0|sem |0.5853|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6410|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.5281|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7204|± |0.0213| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7500|± |0.0693| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench1/logs/phi_darelinear_3.log b/merge_bench1/logs/phi_darelinear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..d64760c55729e66cb841a478a9dedc108cfce13c --- /dev/null +++ b/merge_bench1/logs/phi_darelinear_3.log @@ -0,0 +1,96 @@ +INFO 06-29 01:03:03 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 01:03:05 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 01:03:12 [config.py:717] This model supports multiple tasks: {'embed', 'generate', 'classify', 'score', 'reward'}. Defaulting to 'generate'. +INFO 06-29 01:03:12 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 01:03:12 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 01:03:14 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 01:03:14 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 01:03:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_ec927819'), local_subscribe_addr='ipc:///tmp/23745c04-eb75-425a-9247-160aa07318d7', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:03:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d83872c3'), local_subscribe_addr='ipc:///tmp/398bbe1e-7efd-4a87-ab91-073edcfb664a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:03:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4515ff74'), local_subscribe_addr='ipc:///tmp/0f5f917e-5575-49f2-97cd-0b5a53d09ce5', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:03:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 01:03:14 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6f7051b9'), local_subscribe_addr='ipc:///tmp/a6b1bcf7-fd8e-43f1-a2c7-bf9778e3df12', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:14 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f0edff5e'), local_subscribe_addr='ipc:///tmp/1f7ea330-a6d1-4adf-929f-494b31a181a2', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:16 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:16 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3647451) WARNING 06-29 01:03:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3647450) WARNING 06-29 01:03:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3647449) WARNING 06-29 01:03:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3647448) WARNING 06-29 01:03:17 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:17 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_c874450a'), local_subscribe_addr='ipc:///tmp/2419135e-cb8e-4e7d-97c0-3093cb89c2cc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:17 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:17 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:17 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:17 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3647450) WARNING 06-29 01:03:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3647451) WARNING 06-29 01:03:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:17 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3647448) WARNING 06-29 01:03:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3647449) WARNING 06-29 01:03:17 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:17 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:18 [loader.py:458] Loading weights took 0.65 seconds +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:18 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:18 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:18 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.838996 seconds +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.846890 seconds +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.949377 seconds +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:18 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.906862 seconds +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:24 [backends.py:430] Dynamo bytecode transform time: 5.58 s +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:24 [backends.py:430] Dynamo bytecode transform time: 5.77 s +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:24 [backends.py:430] Dynamo bytecode transform time: 5.83 s +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:24 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:24 [backends.py:430] Dynamo bytecode transform time: 5.93 s +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:29 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.395 s +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:29 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.420 s +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:29 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.425 s +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:29 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.498 s +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:03:35 [monitor.py:33] torch.compile takes 5.83 s in total +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:03:35 [monitor.py:33] torch.compile takes 5.58 s in total +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:03:35 [monitor.py:33] torch.compile takes 5.77 s in total +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:03:35 [monitor.py:33] torch.compile takes 5.93 s in total +INFO 06-29 01:03:36 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 01:03:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 01:03:36 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:03:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:03:36 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:03:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:03:36 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 01:03:36 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3647450) INFO 06-29 01:04:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3647448) INFO 06-29 01:04:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3647451) INFO 06-29 01:04:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3647449) INFO 06-29 01:04:03 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 01:04:03 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.56 seconds +INFO 06-29 01:04:03 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 01:16:38 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 01:16:38 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5248|± |0.0283| +| | |math_pass@1:1_samples|0.7872|± |0.0407| +|mm\|arc_challenge\|0| 0|sem |0.5853|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6399|± |0.0156| +|mm\|commonsenseqa\|0| 0|sem |0.4938|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7494|± |0.0205| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.3802|± |0.0443| + diff --git a/merge_bench1/logs/phi_darelinear_5.log b/merge_bench1/logs/phi_darelinear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..4aff265bb57047afab270e6dc6c42dbbfd334e71 --- /dev/null +++ b/merge_bench1/logs/phi_darelinear_5.log @@ -0,0 +1,96 @@ +INFO 06-29 01:16:37 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 01:16:39 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 01:16:46 [config.py:717] This model supports multiple tasks: {'classify', 'embed', 'score', 'generate', 'reward'}. Defaulting to 'generate'. +INFO 06-29 01:16:46 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 01:16:46 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 01:16:47 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 01:16:47 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 01:16:47 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_1fdcd836'), local_subscribe_addr='ipc:///tmp/09327ae9-5e74-46f6-9a9e-8cb094ce2dca', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:16:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_57cc4ffa'), local_subscribe_addr='ipc:///tmp/b527a988-7155-498c-b9d9-1950c576cdda', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:16:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cced6c5a'), local_subscribe_addr='ipc:///tmp/51d0f6d8-ad13-44fa-9ffc-689043b0c1d0', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:16:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 01:16:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0a80710c'), local_subscribe_addr='ipc:///tmp/0b6ee7e2-751e-402e-960f-1fa1fdf30c45', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3ef19686'), local_subscribe_addr='ipc:///tmp/eadf4aff-a254-4032-b630-86e219535cdc', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:50 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:50 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3651240) WARNING 06-29 01:16:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3651241) WARNING 06-29 01:16:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3651243) WARNING 06-29 01:16:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3651242) WARNING 06-29 01:16:51 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:51 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_5b44e305'), local_subscribe_addr='ipc:///tmp/3359d4f7-42c3-4434-a37e-67b71d7c991c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:51 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:51 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3651242) WARNING 06-29 01:16:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3651241) WARNING 06-29 01:16:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:51 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3651240) WARNING 06-29 01:16:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:51 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:51 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3651243) WARNING 06-29 01:16:51 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:51 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:52 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:52 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:52 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:52 [loader.py:458] Loading weights took 0.79 seconds +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.902135 seconds +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.900187 seconds +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.996576 seconds +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:52 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.922902 seconds +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:16:58 [backends.py:430] Dynamo bytecode transform time: 5.55 s +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:16:58 [backends.py:430] Dynamo bytecode transform time: 5.64 s +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:16:58 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:58 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:16:58 [backends.py:430] Dynamo bytecode transform time: 5.78 s +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:17:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.387 s +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:17:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.400 s +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:17:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.384 s +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:17:03 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.413 s +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:17:09 [monitor.py:33] torch.compile takes 5.64 s in total +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:17:09 [monitor.py:33] torch.compile takes 5.68 s in total +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:17:09 [monitor.py:33] torch.compile takes 5.78 s in total +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:17:09 [monitor.py:33] torch.compile takes 5.55 s in total +INFO 06-29 01:17:10 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 01:17:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 01:17:10 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:17:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:17:10 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:17:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:17:10 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 01:17:10 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3651240) INFO 06-29 01:17:36 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3651243) INFO 06-29 01:17:36 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3651241) INFO 06-29 01:17:36 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3651242) INFO 06-29 01:17:36 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 01:17:36 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.80 seconds +INFO 06-29 01:17:36 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 01:30:15 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 01:30:15 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5206|± |0.0279| +| | |math_pass@1:1_samples|0.7658|± |0.0425| +|mm\|arc_challenge\|0| 0|sem |0.6089|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6167|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5344|± |0.0279| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7315|± |0.0210| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3223|± |0.0427| + diff --git a/merge_bench1/logs/phi_darelinear_7.log b/merge_bench1/logs/phi_darelinear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..ed88689a952399caa9ad2d8e5b41e45ed32ca0a4 --- /dev/null +++ b/merge_bench1/logs/phi_darelinear_7.log @@ -0,0 +1,96 @@ +INFO 06-29 01:30:14 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 01:30:15 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 01:30:22 [config.py:717] This model supports multiple tasks: {'classify', 'generate', 'reward', 'score', 'embed'}. Defaulting to 'generate'. +INFO 06-29 01:30:22 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 01:30:22 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 01:30:24 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 01:30:24 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 01:30:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_d6fc66d5'), local_subscribe_addr='ipc:///tmp/5c096289-9dd0-451a-bcb0-bf9007242568', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:30:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_c4c0c2e8'), local_subscribe_addr='ipc:///tmp/09939f12-d348-4a14-a043-11ae0cbb1f54', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:30:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b5e598d2'), local_subscribe_addr='ipc:///tmp/c9e08431-9f4f-4905-81df-b8caa6443e52', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:30:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 01:30:24 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b0d68ab1'), local_subscribe_addr='ipc:///tmp/a09b3d99-6718-4c90-a28f-9e2ed157f57f', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:24 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a33594bf'), local_subscribe_addr='ipc:///tmp/65ea2efb-6aab-4547-9e77-505e500910c7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3653525) WARNING 06-29 01:30:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3653526) WARNING 06-29 01:30:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3653523) WARNING 06-29 01:30:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3653524) WARNING 06-29 01:30:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_67ea4f74'), local_subscribe_addr='ipc:///tmp/acfcdf53-86fe-458f-8fab-a5c926bbcac7', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:37 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:37 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:37 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:37 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3653526) WARNING 06-29 01:30:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3653525) WARNING 06-29 01:30:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3653523) WARNING 06-29 01:30:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3653524) WARNING 06-29 01:30:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:38 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:38 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:38 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:38 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.864630 seconds +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.864921 seconds +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.929355 seconds +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.967108 seconds +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:44 [backends.py:430] Dynamo bytecode transform time: 5.65 s +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:44 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:44 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:44 [backends.py:430] Dynamo bytecode transform time: 5.97 s +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.383 s +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.458 s +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.492 s +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.397 s +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:30:55 [monitor.py:33] torch.compile takes 5.69 s in total +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:30:55 [monitor.py:33] torch.compile takes 5.97 s in total +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:30:55 [monitor.py:33] torch.compile takes 5.65 s in total +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:30:55 [monitor.py:33] torch.compile takes 5.76 s in total +INFO 06-29 01:30:56 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 01:30:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 01:30:56 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:30:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:30:56 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:30:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:30:56 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 01:30:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=1 pid=3653524) INFO 06-29 01:31:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3653525) INFO 06-29 01:31:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3653526) INFO 06-29 01:31:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3653523) INFO 06-29 01:31:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 01:31:22 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.88 seconds +INFO 06-29 01:31:22 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 01:44:05 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 01:44:05 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5177|± |0.0275| +| | |math_pass@1:1_samples|0.7452|± |0.0450| +|mm\|arc_challenge\|0| 0|sem |0.6273|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |0.6230|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5312|± |0.0279| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7405|± |0.0208| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7500|± |0.0693| +|mm\|truthfulqa\|0 | 0|sem |0.2893|± |0.0414| + diff --git a/merge_bench1/logs/phi_darelinear_9.log b/merge_bench1/logs/phi_darelinear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..f4618d4f9743fc50c984b706b13012e72925be1d --- /dev/null +++ b/merge_bench1/logs/phi_darelinear_9.log @@ -0,0 +1,96 @@ +INFO 06-29 01:44:04 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 01:44:06 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 01:44:13 [config.py:717] This model supports multiple tasks: {'generate', 'reward', 'embed', 'score', 'classify'}. Defaulting to 'generate'. +INFO 06-29 01:44:13 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 01:44:13 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 01:44:15 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 01:44:15 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 01:44:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_6bec245e'), local_subscribe_addr='ipc:///tmp/ac4490f6-414a-4630-bc70-1533c72283a9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:44:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2a53d671'), local_subscribe_addr='ipc:///tmp/e9158a62-66b7-4939-b705-663bfc11f666', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:44:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_efdbda6f'), local_subscribe_addr='ipc:///tmp/c769c3a9-4d79-4ef8-8ca2-13bde4fc970e', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:44:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 01:44:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d762004e'), local_subscribe_addr='ipc:///tmp/130d45dd-1b2e-406f-bda3-9763fcc099e0', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_66dfedbc'), local_subscribe_addr='ipc:///tmp/415bfff0-1da9-4138-9710-0d4d6c2ad4f8', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:17 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:17 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3655494) WARNING 06-29 01:44:18 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3655495) WARNING 06-29 01:44:18 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3655498) WARNING 06-29 01:44:18 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3655497) WARNING 06-29 01:44:18 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:18 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_7eea22f8'), local_subscribe_addr='ipc:///tmp/00b9b1ab-dfe4-428c-8802-7447c793212a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:18 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:18 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:18 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:18 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:18 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:18 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3655495) WARNING 06-29 01:44:18 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3655494) WARNING 06-29 01:44:18 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:18 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:18 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3655497) WARNING 06-29 01:44:18 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3655498) WARNING 06-29 01:44:18 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:18 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:18 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:18 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:18 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:18 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:18 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:19 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:19 [loader.py:458] Loading weights took 0.77 seconds +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.847058 seconds +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.855347 seconds +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.923545 seconds +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:19 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.985661 seconds +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:25 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:25 [backends.py:430] Dynamo bytecode transform time: 5.80 s +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:25 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:25 [backends.py:430] Dynamo bytecode transform time: 5.81 s +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:25 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:25 [backends.py:430] Dynamo bytecode transform time: 5.87 s +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:25 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:25 [backends.py:430] Dynamo bytecode transform time: 5.90 s +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:30 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.458 s +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:30 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.467 s +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:30 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.504 s +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:30 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.516 s +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:44:36 [monitor.py:33] torch.compile takes 5.80 s in total +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:44:36 [monitor.py:33] torch.compile takes 5.81 s in total +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:44:36 [monitor.py:33] torch.compile takes 5.87 s in total +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:44:36 [monitor.py:33] torch.compile takes 5.90 s in total +INFO 06-29 01:44:37 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 01:44:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 01:44:37 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:44:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:44:37 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:44:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:44:37 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 01:44:37 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3655498) INFO 06-29 01:45:00 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3655495) INFO 06-29 01:45:00 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3655494) INFO 06-29 01:45:00 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3655497) INFO 06-29 01:45:00 [gpu_model_runner.py:1686] Graph capturing finished in 23 secs, took 2.96 GiB +INFO 06-29 01:45:00 [core.py:159] init engine (profile, create kv cache, warmup model) took 41.37 seconds +INFO 06-29 01:45:01 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 01:57:38 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 01:57:38 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5314|± |0.0281| +| | |math_pass@1:1_samples|0.7952|± |0.0390| +|mm\|arc_challenge\|0| 0|sem |0.6273|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |0.6241|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5188|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7405|± |0.0208| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.3554|± |0.0437| + diff --git a/merge_bench1/logs/phi_linear_1.log b/merge_bench1/logs/phi_linear_1.log new file mode 100644 index 0000000000000000000000000000000000000000..ce939ccfb5ce97962bd8d680a76541d5cfea1027 --- /dev/null +++ b/merge_bench1/logs/phi_linear_1.log @@ -0,0 +1,96 @@ +INFO 06-29 01:57:37 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 01:57:39 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 01:57:46 [config.py:717] This model supports multiple tasks: {'classify', 'embed', 'reward', 'score', 'generate'}. Defaulting to 'generate'. +INFO 06-29 01:57:46 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 01:57:46 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 01:57:48 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 01:57:48 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 01:57:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_cc8f1498'), local_subscribe_addr='ipc:///tmp/5964ae1a-d018-444a-be76-77eaf5bf43a4', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:57:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 01:57:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7f523742'), local_subscribe_addr='ipc:///tmp/a5efc4b0-ddab-476c-bef5-b8d0e1a716b8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 01:57:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 01:57:48 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1036e5e4'), local_subscribe_addr='ipc:///tmp/a9befbca-2f62-47fa-bd9b-c7f9d4afe1c2', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d3bcc908'), local_subscribe_addr='ipc:///tmp/498e4ce8-cb49-42bc-8d53-f621b8284b11', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_45dad878'), local_subscribe_addr='ipc:///tmp/cf7b484e-18ca-4272-9ebd-926f57866219', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:55 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:55 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:55 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:55 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:55 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:55 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:55 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:55 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3657464) WARNING 06-29 01:57:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3657465) WARNING 06-29 01:57:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3657462) WARNING 06-29 01:57:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3657463) WARNING 06-29 01:57:56 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:56 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_a7ac8a61'), local_subscribe_addr='ipc:///tmp/3c5c1589-2e76-4898-989a-39bd14e0430e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:56 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:56 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:56 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:56 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3657465) WARNING 06-29 01:57:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3657464) WARNING 06-29 01:57:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:56 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3657462) WARNING 06-29 01:57:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3657463) WARNING 06-29 01:57:56 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:56 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:57 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:57 [loader.py:458] Loading weights took 0.73 seconds +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:57 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:57 [loader.py:458] Loading weights took 0.81 seconds +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:57:57 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.937416 seconds +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:57:57 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.940440 seconds +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:57:57 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 1.060061 seconds +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:57:57 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.987784 seconds +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:58:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:58:03 [backends.py:430] Dynamo bytecode transform time: 5.56 s +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:58:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:58:03 [backends.py:430] Dynamo bytecode transform time: 5.63 s +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:58:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:58:03 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:58:03 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:58:03 [backends.py:430] Dynamo bytecode transform time: 5.90 s +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:58:08 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.428 s +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:58:08 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.374 s +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:58:08 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.440 s +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:58:08 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.442 s +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:58:14 [monitor.py:33] torch.compile takes 5.56 s in total +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:58:14 [monitor.py:33] torch.compile takes 5.69 s in total +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:58:14 [monitor.py:33] torch.compile takes 5.63 s in total +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:58:14 [monitor.py:33] torch.compile takes 5.90 s in total +INFO 06-29 01:58:15 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 01:58:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 01:58:15 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:58:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:58:15 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 01:58:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 01:58:15 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 01:58:15 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3657465) INFO 06-29 01:58:41 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3657463) INFO 06-29 01:58:41 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3657464) INFO 06-29 01:58:41 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3657462) INFO 06-29 01:58:41 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-29 01:58:41 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.44 seconds +INFO 06-29 01:58:41 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 02:11:25 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 02:11:25 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5082|± |0.0280| +| | |math_pass@1:1_samples|0.7986|± |0.0389| +|mm\|arc_challenge\|0| 0|sem |0.5696|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6463|± |0.0155| +|mm\|commonsenseqa\|0| 0|sem |0.4781|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7472|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.3388|± |0.0432| + diff --git a/merge_bench1/logs/phi_linear_3.log b/merge_bench1/logs/phi_linear_3.log new file mode 100644 index 0000000000000000000000000000000000000000..6dc1b6ff63e275a635e06441d05f47b7ecf10906 --- /dev/null +++ b/merge_bench1/logs/phi_linear_3.log @@ -0,0 +1,96 @@ +INFO 06-29 02:11:23 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 02:11:25 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 02:11:32 [config.py:717] This model supports multiple tasks: {'embed', 'reward', 'generate', 'classify', 'score'}. Defaulting to 'generate'. +INFO 06-29 02:11:32 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 02:11:32 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 02:11:34 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 02:11:34 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 02:11:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_1358414b'), local_subscribe_addr='ipc:///tmp/4d5c4986-492b-474f-b89d-2a5d4955e5af', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:11:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_59fe3cf7'), local_subscribe_addr='ipc:///tmp/b8d313f1-0164-47ea-a4e9-4961c19287f4', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:11:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6d608938'), local_subscribe_addr='ipc:///tmp/3dcb43a5-d53a-4cc7-8c70-eeccce53f2de', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:11:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 02:11:34 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_146b65aa'), local_subscribe_addr='ipc:///tmp/3373959a-1f51-4547-bad9-a3c7bbdb1f92', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:34 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_15060f1b'), local_subscribe_addr='ipc:///tmp/d512723d-12ea-405c-94f5-b671a7bb9065', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:36 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:36 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3659437) WARNING 06-29 02:11:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3659436) WARNING 06-29 02:11:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3659435) WARNING 06-29 02:11:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3659434) WARNING 06-29 02:11:37 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:37 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_c1b5d7db'), local_subscribe_addr='ipc:///tmp/81b62f8f-22e4-4970-a83f-30f55ac6fd53', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:37 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:37 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:37 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:37 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3659436) WARNING 06-29 02:11:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:37 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3659437) WARNING 06-29 02:11:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3659435) WARNING 06-29 02:11:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3659434) WARNING 06-29 02:11:37 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:37 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:37 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:37 [loader.py:458] Loading weights took 0.67 seconds +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:37 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:38 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.855935 seconds +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.855793 seconds +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.911111 seconds +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:38 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.960708 seconds +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:44 [backends.py:430] Dynamo bytecode transform time: 5.62 s +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:44 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:44 [backends.py:430] Dynamo bytecode transform time: 5.68 s +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:44 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:44 [backends.py:430] Dynamo bytecode transform time: 5.93 s +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.432 s +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.455 s +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.451 s +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:49 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.475 s +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:11:55 [monitor.py:33] torch.compile takes 5.62 s in total +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:11:55 [monitor.py:33] torch.compile takes 5.68 s in total +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:11:55 [monitor.py:33] torch.compile takes 5.93 s in total +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:11:55 [monitor.py:33] torch.compile takes 5.67 s in total +INFO 06-29 02:11:56 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 02:11:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 02:11:56 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:11:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:11:56 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:11:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:11:56 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 02:11:56 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3659434) INFO 06-29 02:12:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3659436) INFO 06-29 02:12:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3659437) INFO 06-29 02:12:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3659435) INFO 06-29 02:12:22 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 02:12:22 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.07 seconds +INFO 06-29 02:12:23 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 02:24:58 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 02:24:58 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5255|± |0.0280| +| | |math_pass@1:1_samples|0.7908|± |0.0391| +|mm\|arc_challenge\|0| 0|sem |0.6089|± |0.0250| +|mm\|arc_easy\|0 | 0|sem |0.6262|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5281|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7315|± |0.0210| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.3388|± |0.0432| + diff --git a/merge_bench1/logs/phi_linear_5.log b/merge_bench1/logs/phi_linear_5.log new file mode 100644 index 0000000000000000000000000000000000000000..290eb6f9260cfd5df96f71041e2b1120891adb0f --- /dev/null +++ b/merge_bench1/logs/phi_linear_5.log @@ -0,0 +1,96 @@ +INFO 06-29 02:24:57 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 02:24:59 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 02:25:06 [config.py:717] This model supports multiple tasks: {'score', 'embed', 'reward', 'classify', 'generate'}. Defaulting to 'generate'. +INFO 06-29 02:25:06 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 02:25:06 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 02:25:07 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 02:25:07 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 02:25:07 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_c5695c62'), local_subscribe_addr='ipc:///tmp/9d8a2c41-d2f8-4a03-b966-d52c2481e947', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:25:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_cc0d0725'), local_subscribe_addr='ipc:///tmp/0ac5144d-a008-4260-a5b6-42bcf2f66eec', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:25:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 02:25:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2e19fa10'), local_subscribe_addr='ipc:///tmp/2e5e0c9c-e9a6-4d78-bd36-f06bf35913ad', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:25:08 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a0007219'), local_subscribe_addr='ipc:///tmp/230e5caa-32ec-4cf4-b830-1ea83c14a260', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:08 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_42fe154d'), local_subscribe_addr='ipc:///tmp/b6394ef2-b922-4fba-9c86-12d294eadfcb', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:09 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:09 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:09 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:10 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:10 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3661411) WARNING 06-29 02:25:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3661410) WARNING 06-29 02:25:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3661409) WARNING 06-29 02:25:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3661408) WARNING 06-29 02:25:10 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:10 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_b95835a1'), local_subscribe_addr='ipc:///tmp/47e1a283-d9d1-4d6a-b3d8-1737fac2f697', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:10 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:10 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:10 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3661410) WARNING 06-29 02:25:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3661409) WARNING 06-29 02:25:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:10 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:10 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3661408) WARNING 06-29 02:25:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3661411) WARNING 06-29 02:25:10 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:10 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:10 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:10 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:10 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:11 [loader.py:458] Loading weights took 0.66 seconds +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:11 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:11 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:11 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:11 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.852984 seconds +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:11 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.884060 seconds +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:11 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.965591 seconds +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:12 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.932077 seconds +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:17 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:17 [backends.py:430] Dynamo bytecode transform time: 5.73 s +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:17 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:17 [backends.py:430] Dynamo bytecode transform time: 5.75 s +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:18 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:18 [backends.py:430] Dynamo bytecode transform time: 5.99 s +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:23 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.663 s +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:23 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.627 s +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:23 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.712 s +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:23 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.605 s +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:29 [monitor.py:33] torch.compile takes 5.73 s in total +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:29 [monitor.py:33] torch.compile takes 5.99 s in total +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:29 [monitor.py:33] torch.compile takes 5.75 s in total +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:29 [monitor.py:33] torch.compile takes 5.67 s in total +INFO 06-29 02:25:30 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 02:25:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 02:25:30 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:25:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:25:30 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:25:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:25:30 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 02:25:30 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3661411) INFO 06-29 02:25:56 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3661408) INFO 06-29 02:25:56 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3661410) INFO 06-29 02:25:56 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3661409) INFO 06-29 02:25:56 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 02:25:56 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.14 seconds +INFO 06-29 02:25:56 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 02:38:36 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 02:38:36 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5306|± |0.0280| +| | |math_pass@1:1_samples|0.7796|± |0.0393| +|mm\|arc_challenge\|0| 0|sem |0.6273|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |0.6294|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5188|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7092|± |0.0215| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.3471|± |0.0435| + diff --git a/merge_bench1/logs/phi_linear_7.log b/merge_bench1/logs/phi_linear_7.log new file mode 100644 index 0000000000000000000000000000000000000000..8740a7b01a431bb886273f53a5eb58375354ee25 --- /dev/null +++ b/merge_bench1/logs/phi_linear_7.log @@ -0,0 +1,96 @@ +INFO 06-29 02:38:35 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 02:38:37 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 02:38:44 [config.py:717] This model supports multiple tasks: {'classify', 'reward', 'generate', 'embed', 'score'}. Defaulting to 'generate'. +INFO 06-29 02:38:44 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 02:38:44 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 02:38:46 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 02:38:46 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 02:38:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_c4796ff5'), local_subscribe_addr='ipc:///tmp/50ce4cdc-4e86-420f-a951-6edb5acd3067', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:38:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_599b29dc'), local_subscribe_addr='ipc:///tmp/89b285ac-4a68-4ddc-8709-fb36b59c8102', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:38:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_846dd766'), local_subscribe_addr='ipc:///tmp/df90d7ca-8cfb-4a8e-b192-54e6a56e8d45', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:38:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 02:38:46 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_9229f683'), local_subscribe_addr='ipc:///tmp/eef446eb-37cb-48e7-a41b-7cd4075c270a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7c6d6e35'), local_subscribe_addr='ipc:///tmp/186a40d9-d658-414b-aa06-e45893a2cf50', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:48 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:48 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3663374) WARNING 06-29 02:38:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3663375) WARNING 06-29 02:38:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3663372) WARNING 06-29 02:38:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3663373) WARNING 06-29 02:38:48 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:48 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_38be11f8'), local_subscribe_addr='ipc:///tmp/3810220c-9bcf-427a-89d0-6a7ad7167336', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:48 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:48 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:48 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3663374) WARNING 06-29 02:38:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3663375) WARNING 06-29 02:38:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3663373) WARNING 06-29 02:38:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:48 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:48 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3663372) WARNING 06-29 02:38:48 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:48 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:49 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:49 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:49 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:49 [loader.py:458] Loading weights took 0.77 seconds +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:50 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.861284 seconds +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:50 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.877311 seconds +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:50 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.916509 seconds +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:50 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.975399 seconds +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:55 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:38:55 [backends.py:430] Dynamo bytecode transform time: 5.54 s +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:38:56 [backends.py:430] Dynamo bytecode transform time: 5.74 s +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:38:56 [backends.py:430] Dynamo bytecode transform time: 5.80 s +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:56 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:38:56 [backends.py:430] Dynamo bytecode transform time: 5.85 s +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:39:01 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.366 s +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:39:01 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.463 s +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:39:01 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.424 s +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:39:01 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.418 s +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:39:07 [monitor.py:33] torch.compile takes 5.80 s in total +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:39:07 [monitor.py:33] torch.compile takes 5.74 s in total +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:39:07 [monitor.py:33] torch.compile takes 5.85 s in total +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:39:07 [monitor.py:33] torch.compile takes 5.54 s in total +INFO 06-29 02:39:08 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 02:39:08 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 02:39:08 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:39:08 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:39:08 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:39:08 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:39:08 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 02:39:08 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3663375) INFO 06-29 02:39:36 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3663374) INFO 06-29 02:39:36 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3663373) INFO 06-29 02:39:36 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3663372) INFO 06-29 02:39:36 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +INFO 06-29 02:39:36 [core.py:159] init engine (profile, create kv cache, warmup model) took 45.74 seconds +INFO 06-29 02:39:36 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 02:52:18 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 02:52:18 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5292|± |0.0280| +| | |math_pass@1:1_samples|0.7501|± |0.0429| +|mm\|arc_challenge\|0| 0|sem |0.6299|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |0.6283|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5031|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7002|± |0.0217| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3554|± |0.0437| + diff --git a/merge_bench1/logs/phi_linear_9.log b/merge_bench1/logs/phi_linear_9.log new file mode 100644 index 0000000000000000000000000000000000000000..5d2884c755706b5194c4317dcf3e18b384e803dd --- /dev/null +++ b/merge_bench1/logs/phi_linear_9.log @@ -0,0 +1,96 @@ +INFO 06-29 02:52:17 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 02:52:19 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 02:52:26 [config.py:717] This model supports multiple tasks: {'reward', 'score', 'generate', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-29 02:52:26 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 02:52:26 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 02:52:27 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 02:52:27 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 02:52:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_644d2295'), local_subscribe_addr='ipc:///tmp/ca8f5265-229b-42cc-86c0-20085ef721e4', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:52:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_92d03026'), local_subscribe_addr='ipc:///tmp/2fc8ad11-3022-4a60-b482-c6926b112815', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:52:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3bc49d68'), local_subscribe_addr='ipc:///tmp/ebb2046d-8f8a-4784-b197-fc9bd9966aef', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 02:52:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 02:52:28 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_db3f9dbe'), local_subscribe_addr='ipc:///tmp/a5303145-e98b-401d-9a5e-752686a85b4e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:28 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_7ab8cce9'), local_subscribe_addr='ipc:///tmp/ec441093-eae3-4ed1-a225-c319e0fc7239', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:30 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:30 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3665333) WARNING 06-29 02:52:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3665332) WARNING 06-29 02:52:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3665331) WARNING 06-29 02:52:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3665330) WARNING 06-29 02:52:30 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:30 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_2b656574'), local_subscribe_addr='ipc:///tmp/d34b71fb-1b2b-4ba9-9d8f-88e5b3f7f03d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:30 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:30 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:30 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:30 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3665333) WARNING 06-29 02:52:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3665332) WARNING 06-29 02:52:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:30 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3665331) WARNING 06-29 02:52:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3665330) WARNING 06-29 02:52:30 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:30 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:31 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:31 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:31 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:31 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.889313 seconds +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.889936 seconds +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:31 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.927022 seconds +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:32 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.983368 seconds +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:37 [backends.py:430] Dynamo bytecode transform time: 5.54 s +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:37 [backends.py:430] Dynamo bytecode transform time: 5.56 s +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:37 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:37 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:37 [backends.py:430] Dynamo bytecode transform time: 5.74 s +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.374 s +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.384 s +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.344 s +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:42 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.438 s +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:52:48 [monitor.py:33] torch.compile takes 5.74 s in total +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:52:48 [monitor.py:33] torch.compile takes 5.67 s in total +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:52:48 [monitor.py:33] torch.compile takes 5.54 s in total +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:52:48 [monitor.py:33] torch.compile takes 5.56 s in total +INFO 06-29 02:52:49 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 02:52:49 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 02:52:49 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:52:49 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:52:49 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 02:52:49 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 02:52:49 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 02:52:49 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3665332) INFO 06-29 02:53:15 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3665333) INFO 06-29 02:53:15 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3665330) INFO 06-29 02:53:15 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3665331) INFO 06-29 02:53:15 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 02:53:15 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.52 seconds +INFO 06-29 02:53:15 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 03:05:53 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 03:05:53 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5150|± |0.0277| +| | |math_pass@1:1_samples|0.7385|± |0.0452| +|mm\|arc_challenge\|0| 0|sem |0.6220|± |0.0249| +|mm\|arc_easy\|0 | 0|sem |0.6241|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5000|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7271|± |0.0211| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7500|± |0.0693| +|mm\|truthfulqa\|0 | 0|sem |0.3140|± |0.0424| + diff --git a/merge_bench1/logs/phi_ties_1.log b/merge_bench1/logs/phi_ties_1.log new file mode 100644 index 0000000000000000000000000000000000000000..2ca76046bba23c8e6a4a96b386e71f348f4042ac --- /dev/null +++ b/merge_bench1/logs/phi_ties_1.log @@ -0,0 +1,96 @@ +INFO 06-29 03:05:52 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 03:05:53 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 03:06:00 [config.py:717] This model supports multiple tasks: {'generate', 'embed', 'score', 'reward', 'classify'}. Defaulting to 'generate'. +INFO 06-29 03:06:00 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 03:06:00 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 03:06:02 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 03:06:02 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 03:06:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_ba3201d8'), local_subscribe_addr='ipc:///tmp/79f94962-df31-4714-a5ba-66e8dabc65f0', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:06:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 03:06:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a98cd5c8'), local_subscribe_addr='ipc:///tmp/1665beca-a759-4a74-8a35-328b73af7aab', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:06:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_997a3d0d'), local_subscribe_addr='ipc:///tmp/5fa1bf18-a627-4f65-a698-951a2e2be934', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:06:02 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_64e56576'), local_subscribe_addr='ipc:///tmp/add31029-1e82-4cb0-91a5-1096740d59b3', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:02 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0123b614'), local_subscribe_addr='ipc:///tmp/ec760e8b-edf8-4c14-85ba-8192fe5f2fc9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:04 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:04 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3667354) WARNING 06-29 03:06:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3667355) WARNING 06-29 03:06:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3667353) WARNING 06-29 03:06:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3667352) WARNING 06-29 03:06:05 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:05 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_e9e897c3'), local_subscribe_addr='ipc:///tmp/e92f1b57-1c65-4d47-b989-9bc2763f34ca', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:05 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:05 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:05 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:05 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3667355) WARNING 06-29 03:06:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3667354) WARNING 06-29 03:06:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:05 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3667352) WARNING 06-29 03:06:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3667353) WARNING 06-29 03:06:05 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:05 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:06 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:06 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:06 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:06 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.869035 seconds +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.869307 seconds +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.963994 seconds +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:06 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.924952 seconds +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:12 [backends.py:430] Dynamo bytecode transform time: 5.86 s +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:12 [backends.py:430] Dynamo bytecode transform time: 5.97 s +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:12 [backends.py:430] Dynamo bytecode transform time: 6.03 s +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:12 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:12 [backends.py:430] Dynamo bytecode transform time: 6.06 s +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.370 s +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.428 s +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.399 s +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:17 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.410 s +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:23 [monitor.py:33] torch.compile takes 5.86 s in total +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:23 [monitor.py:33] torch.compile takes 6.03 s in total +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:23 [monitor.py:33] torch.compile takes 5.97 s in total +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:23 [monitor.py:33] torch.compile takes 6.06 s in total +INFO 06-29 03:06:24 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 03:06:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 03:06:24 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:06:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:06:24 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:06:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:06:24 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 03:06:24 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=3 pid=3667355) INFO 06-29 03:06:50 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3667354) INFO 06-29 03:06:50 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3667353) INFO 06-29 03:06:50 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3667352) INFO 06-29 03:06:50 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-29 03:06:50 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.64 seconds +INFO 06-29 03:06:50 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 03:19:33 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 03:19:33 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5172|± |0.0278| +| | |math_pass@1:1_samples|0.7669|± |0.0425| +|mm\|arc_challenge\|0| 0|sem |0.6247|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |0.6146|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5156|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7338|± |0.0209| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8000|± |0.0641| +|mm\|truthfulqa\|0 | 0|sem |0.3140|± |0.0424| + diff --git a/merge_bench1/logs/phi_ties_3.log b/merge_bench1/logs/phi_ties_3.log new file mode 100644 index 0000000000000000000000000000000000000000..4a7affafee1d40fb29da69a4623987e2cb1e42d7 --- /dev/null +++ b/merge_bench1/logs/phi_ties_3.log @@ -0,0 +1,96 @@ +INFO 06-29 03:19:32 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 03:19:34 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 03:19:42 [config.py:717] This model supports multiple tasks: {'embed', 'generate', 'score', 'reward', 'classify'}. Defaulting to 'generate'. +INFO 06-29 03:19:42 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 03:19:42 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 03:19:43 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 03:19:43 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 03:19:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_f60685f9'), local_subscribe_addr='ipc:///tmp/5b34a253-5630-4983-9d75-c91db1e7bc66', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:19:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_6688640e'), local_subscribe_addr='ipc:///tmp/97ed54ce-ac52-4eb9-82d8-07a854f5b5ca', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:19:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_0d7429ca'), local_subscribe_addr='ipc:///tmp/c65953f5-c4db-4e85-956d-694608af0d47', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:19:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 03:19:43 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_13789726'), local_subscribe_addr='ipc:///tmp/e5fd8a8d-2f89-4560-bfb6-d75b0b0a32e9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_84a8432e'), local_subscribe_addr='ipc:///tmp/dabdb9a0-7446-4a4e-a490-9c7278be1059', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3669312) WARNING 06-29 03:19:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3669311) WARNING 06-29 03:19:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=3 pid=3669314) WARNING 06-29 03:19:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3669313) WARNING 06-29 03:19:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_72b17e88'), local_subscribe_addr='ipc:///tmp/bbb7421d-6947-4c9d-b696-b6d2efe0950e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:46 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:46 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:46 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:46 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3669312) WARNING 06-29 03:19:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3669311) WARNING 06-29 03:19:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3669314) WARNING 06-29 03:19:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3669313) WARNING 06-29 03:19:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:47 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:47 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:47 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:47 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.864661 seconds +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.862463 seconds +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.956508 seconds +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.907952 seconds +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:53 [backends.py:430] Dynamo bytecode transform time: 5.56 s +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:53 [backends.py:430] Dynamo bytecode transform time: 5.73 s +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:53 [backends.py:430] Dynamo bytecode transform time: 5.80 s +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:53 [backends.py:430] Dynamo bytecode transform time: 5.81 s +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:19:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.380 s +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:19:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.399 s +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:19:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.442 s +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:19:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.465 s +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:20:04 [monitor.py:33] torch.compile takes 5.80 s in total +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:20:04 [monitor.py:33] torch.compile takes 5.73 s in total +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:20:04 [monitor.py:33] torch.compile takes 5.56 s in total +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:20:04 [monitor.py:33] torch.compile takes 5.81 s in total +INFO 06-29 03:20:05 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 03:20:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 03:20:05 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:20:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:20:05 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:20:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:20:05 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 03:20:05 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3669311) INFO 06-29 03:20:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3669312) INFO 06-29 03:20:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3669314) INFO 06-29 03:20:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3669313) INFO 06-29 03:20:31 [gpu_model_runner.py:1686] Graph capturing finished in 26 secs, took 2.96 GiB +INFO 06-29 03:20:31 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.87 seconds +INFO 06-29 03:20:31 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 03:33:05 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 03:33:05 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5086|± |0.0279| +| | |math_pass@1:1_samples|0.7656|± |0.0436| +|mm\|arc_challenge\|0| 0|sem |0.5774|± |0.0253| +|mm\|arc_easy\|0 | 0|sem |0.6241|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.5188|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7562|± |0.0203| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7750|± |0.0669| +|mm\|truthfulqa\|0 | 0|sem |0.3140|± |0.0424| + diff --git a/merge_bench1/logs/phi_ties_5.log b/merge_bench1/logs/phi_ties_5.log new file mode 100644 index 0000000000000000000000000000000000000000..6da5fdd9355269adebdb9d59e5a1b63040bfdb7d --- /dev/null +++ b/merge_bench1/logs/phi_ties_5.log @@ -0,0 +1,96 @@ +INFO 06-29 03:33:04 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 03:33:06 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 03:33:13 [config.py:717] This model supports multiple tasks: {'generate', 'classify', 'embed', 'score', 'reward'}. Defaulting to 'generate'. +INFO 06-29 03:33:13 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 03:33:13 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 03:33:15 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 03:33:15 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 03:33:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_e1b020b7'), local_subscribe_addr='ipc:///tmp/ef4ccf98-5e19-44da-92e8-cd0d3c1510b7', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:33:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b7abb855'), local_subscribe_addr='ipc:///tmp/4c57da17-7639-4ca6-b8d2-9f74965c6ea1', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:33:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 03:33:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_52ce13c8'), local_subscribe_addr='ipc:///tmp/e596dc71-2f66-46a6-b66a-1786f20cb1be', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:33:15 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3f17dc47'), local_subscribe_addr='ipc:///tmp/8932f218-9280-4e7c-b730-8364c77895da', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a34eacdf'), local_subscribe_addr='ipc:///tmp/fdf0bcaa-af0b-4edf-a529-9ea96c23a1a0', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:26 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:26 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3671289) WARNING 06-29 03:33:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3671288) WARNING 06-29 03:33:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3671287) WARNING 06-29 03:33:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3671286) WARNING 06-29 03:33:27 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:27 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_f4609f6d'), local_subscribe_addr='ipc:///tmp/5e8bc0d2-7e01-487d-9d81-48a37ff40918', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:27 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:27 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:27 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:27 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3671288) WARNING 06-29 03:33:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3671289) WARNING 06-29 03:33:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:27 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3671287) WARNING 06-29 03:33:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3671286) WARNING 06-29 03:33:27 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:27 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:28 [loader.py:458] Loading weights took 0.70 seconds +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:28 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:28 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:28 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:28 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.893043 seconds +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:28 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.936478 seconds +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:28 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.891848 seconds +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:28 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.906105 seconds +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:34 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:34 [backends.py:430] Dynamo bytecode transform time: 5.98 s +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:34 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:34 [backends.py:430] Dynamo bytecode transform time: 6.16 s +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:34 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:34 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:34 [backends.py:430] Dynamo bytecode transform time: 6.17 s +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:34 [backends.py:430] Dynamo bytecode transform time: 6.17 s +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:39 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.495 s +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:40 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.453 s +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:40 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.473 s +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:40 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.515 s +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:33:46 [monitor.py:33] torch.compile takes 6.17 s in total +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:33:46 [monitor.py:33] torch.compile takes 6.17 s in total +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:33:46 [monitor.py:33] torch.compile takes 6.16 s in total +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:33:46 [monitor.py:33] torch.compile takes 5.98 s in total +INFO 06-29 03:33:47 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 03:33:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 03:33:47 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:33:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:33:47 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:33:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:33:47 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 03:33:47 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3671288) INFO 06-29 03:34:15 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3671289) INFO 06-29 03:34:15 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3671286) INFO 06-29 03:34:15 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3671287) INFO 06-29 03:34:15 [gpu_model_runner.py:1686] Graph capturing finished in 28 secs, took 2.96 GiB +INFO 06-29 03:34:15 [core.py:159] init engine (profile, create kv cache, warmup model) took 46.78 seconds +INFO 06-29 03:34:15 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 03:46:54 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 03:46:54 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5157|± |0.0282| +| | |math_pass@1:1_samples|0.7214|± |0.0470| +|mm\|arc_challenge\|0| 0|sem |0.5958|± |0.0252| +|mm\|arc_easy\|0 | 0|sem |0.6146|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.4969|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7427|± |0.0207| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.7000|± |0.0734| +|mm\|truthfulqa\|0 | 0|sem |0.3554|± |0.0437| + diff --git a/merge_bench1/logs/phi_ties_7.log b/merge_bench1/logs/phi_ties_7.log new file mode 100644 index 0000000000000000000000000000000000000000..80c72cdd92b0e2c3c839a73b2523191f00af2189 --- /dev/null +++ b/merge_bench1/logs/phi_ties_7.log @@ -0,0 +1,96 @@ +INFO 06-29 03:46:53 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 03:46:55 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 03:47:02 [config.py:717] This model supports multiple tasks: {'classify', 'score', 'generate', 'reward', 'embed'}. Defaulting to 'generate'. +INFO 06-29 03:47:02 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 03:47:02 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 03:47:03 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 03:47:03 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 03:47:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_798bad1b'), local_subscribe_addr='ipc:///tmp/1004865e-fd6b-4806-8a6c-d5d600d07d89', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:47:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a040599b'), local_subscribe_addr='ipc:///tmp/bedf2e20-54fd-4ed0-8ab5-5211f61542a8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:47:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_82b01692'), local_subscribe_addr='ipc:///tmp/a953b2b2-3c2c-44fd-9d56-973cd5acbf7f', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 03:47:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 03:47:03 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:03 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_31beec63'), local_subscribe_addr='ipc:///tmp/a5405392-6f73-4c53-b3c5-ed7a8ef03f87', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:04 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_38e5878c'), local_subscribe_addr='ipc:///tmp/a5b114f1-9b83-472c-8075-f33684f2b4b3', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:05 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:05 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3673251) WARNING 06-29 03:47:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3673250) WARNING 06-29 03:47:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3673248) WARNING 06-29 03:47:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3673249) WARNING 06-29 03:47:06 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:06 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_2bcf11f0'), local_subscribe_addr='ipc:///tmp/04b20605-7549-4d52-9e3d-28897329630f', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:06 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:06 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:06 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3673250) WARNING 06-29 03:47:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3673251) WARNING 06-29 03:47:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3673249) WARNING 06-29 03:47:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:06 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:06 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3673248) WARNING 06-29 03:47:06 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:06 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:07 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:07 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:07 [loader.py:458] Loading weights took 0.72 seconds +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:07 [loader.py:458] Loading weights took 0.75 seconds +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.864054 seconds +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.864618 seconds +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.930668 seconds +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:07 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.985715 seconds +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:13 [backends.py:430] Dynamo bytecode transform time: 5.54 s +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:13 [backends.py:430] Dynamo bytecode transform time: 5.63 s +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:13 [backends.py:430] Dynamo bytecode transform time: 5.65 s +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:13 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:13 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.367 s +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.345 s +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.360 s +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:18 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.343 s +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:24 [monitor.py:33] torch.compile takes 5.69 s in total +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:24 [monitor.py:33] torch.compile takes 5.65 s in total +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:24 [monitor.py:33] torch.compile takes 5.63 s in total +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:24 [monitor.py:33] torch.compile takes 5.54 s in total +INFO 06-29 03:47:25 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 03:47:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 03:47:25 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:47:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:47:25 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 03:47:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 03:47:25 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 03:47:25 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=2 pid=3673250) INFO 06-29 03:47:51 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3673251) INFO 06-29 03:47:51 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3673249) INFO 06-29 03:47:51 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +(VllmWorker rank=0 pid=3673248) INFO 06-29 03:47:51 [gpu_model_runner.py:1686] Graph capturing finished in 27 secs, took 2.96 GiB +INFO 06-29 03:47:51 [core.py:159] init engine (profile, create kv cache, warmup model) took 44.20 seconds +INFO 06-29 03:47:52 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 04:00:34 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 04:00:34 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5189|± |0.0278| +| | |math_pass@1:1_samples|0.7885|± |0.0391| +|mm\|arc_challenge\|0| 0|sem |0.6273|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |0.6199|± |0.0158| +|mm\|commonsenseqa\|0| 0|sem |0.5062|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7271|± |0.0211| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8500|± |0.0572| +|mm\|truthfulqa\|0 | 0|sem |0.3223|± |0.0427| + diff --git a/merge_bench1/logs/phi_ties_9.log b/merge_bench1/logs/phi_ties_9.log new file mode 100644 index 0000000000000000000000000000000000000000..73ddff943b59367ecfd1a6843b3cf25b2ab192ae --- /dev/null +++ b/merge_bench1/logs/phi_ties_9.log @@ -0,0 +1,96 @@ +INFO 06-29 04:00:33 [__init__.py:239] Automatically detected platform cuda. +INFO 06-29 04:00:35 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-29 04:00:42 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'reward', 'classify', 'embed'}. Defaulting to 'generate'. +INFO 06-29 04:00:42 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-29 04:00:42 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-29 04:00:43 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=4, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-29 04:00:43 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-29 04:00:43 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1, 2, 3], buffer_handle=(4, 10485760, 10, 'psm_af60a0ce'), local_subscribe_addr='ipc:///tmp/4030fce9-397d-442b-8e73-36ce1b4dce87', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 04:00:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_67a4ecac'), local_subscribe_addr='ipc:///tmp/da7d9051-d8f0-4ab8-9100-81bea5976e5a', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 04:00:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_3cebc9c5'), local_subscribe_addr='ipc:///tmp/e3963ce2-49be-48d6-96ec-40563fa4d796', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-29 04:00:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +WARNING 06-29 04:00:44 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4b1b24fc'), local_subscribe_addr='ipc:///tmp/af6fc51c-8443-4384-b66e-b8dc28a2e02f', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:44 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_f1c52658'), local_subscribe_addr='ipc:///tmp/6cee265f-5844-474e-a30f-6b6582c75b20', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:45 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:45 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=3 pid=3675218) WARNING 06-29 04:00:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=2 pid=3675217) WARNING 06-29 04:00:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3675215) WARNING 06-29 04:00:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=1 pid=3675216) WARNING 06-29 04:00:46 [custom_all_reduce.py:136] Custom allreduce is disabled because it's not supported on more than two PCIe-only GPUs. To silence this warning, specify disable_custom_all_reduce=True explicitly. +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:46 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1, 2, 3], buffer_handle=(3, 4194304, 6, 'psm_3bba0467'), local_subscribe_addr='ipc:///tmp/dd1fe8e6-9de8-475b-bfa5-7d74cf4079d5', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:46 [parallel_state.py:1004] rank 3 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 3 +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:46 [parallel_state.py:1004] rank 2 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 2 +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:46 [parallel_state.py:1004] rank 1 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=3 pid=3675218) WARNING 06-29 04:00:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=2 pid=3675217) WARNING 06-29 04:00:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3675216) WARNING 06-29 04:00:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:46 [parallel_state.py:1004] rank 0 in world size 4 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:46 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3675215) WARNING 06-29 04:00:46 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:46 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:47 [loader.py:458] Loading weights took 0.69 seconds +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:47 [loader.py:458] Loading weights took 0.71 seconds +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:47 [loader.py:458] Loading weights took 0.68 seconds +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:47 [loader.py:458] Loading weights took 0.74 seconds +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.881764 seconds +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.896150 seconds +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.897381 seconds +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:47 [gpu_model_runner.py:1347] Model loading took 1.8196 GiB and 0.959108 seconds +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_3_0 for vLLM's torch.compile +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:53 [backends.py:430] Dynamo bytecode transform time: 5.60 s +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:53 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:53 [backends.py:430] Dynamo bytecode transform time: 5.74 s +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:53 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/bc6735f00d/rank_2_0 for vLLM's torch.compile +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:53 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:00:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.668 s +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:00:58 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.795 s +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:00:59 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.655 s +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:00:59 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.723 s +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:01:04 [monitor.py:33] torch.compile takes 5.60 s in total +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:01:04 [monitor.py:33] torch.compile takes 5.76 s in total +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:01:04 [monitor.py:33] torch.compile takes 5.67 s in total +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:01:04 [monitor.py:33] torch.compile takes 5.74 s in total +INFO 06-29 04:01:06 [kv_cache_utils.py:634] GPU KV cache size: 2,007,088 tokens +INFO 06-29 04:01:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.02x +INFO 06-29 04:01:06 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 04:01:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 04:01:06 [kv_cache_utils.py:634] GPU KV cache size: 2,006,832 tokens +INFO 06-29 04:01:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 979.90x +INFO 06-29 04:01:06 [kv_cache_utils.py:634] GPU KV cache size: 2,008,112 tokens +INFO 06-29 04:01:06 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 980.52x +(VllmWorker rank=0 pid=3675215) INFO 06-29 04:01:31 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=1 pid=3675216) INFO 06-29 04:01:31 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=2 pid=3675217) INFO 06-29 04:01:31 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +(VllmWorker rank=3 pid=3675218) INFO 06-29 04:01:31 [gpu_model_runner.py:1686] Graph capturing finished in 25 secs, took 2.96 GiB +INFO 06-29 04:01:31 [core.py:159] init engine (profile, create kv cache, warmup model) took 43.58 seconds +INFO 06-29 04:01:31 [core_client.py:439] Core engine process 0 ready. +INFO 06-29 04:14:09 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-29 04:14:09 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5183|± |0.0278| +| | |math_pass@1:1_samples|0.7850|± |0.0407| +|mm\|arc_challenge\|0| 0|sem |0.6299|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |0.6304|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |0.4906|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7450|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.3223|± |0.0427| + diff --git a/merge_bench1/logs/results.log b/merge_bench1/logs/results.log new file mode 100644 index 0000000000000000000000000000000000000000..6cd5f28e87c5d2bbb08f35c0cb9ecc7df61e6f0c --- /dev/null +++ b/merge_bench1/logs/results.log @@ -0,0 +1,93 @@ +| Task |Version| Metric | Model |Value | |Stderr| +|------------------|------:|---------------------|--------------------------|-----:|---|-----:| +|mm\|arc_challenge\|0| 0|sem |._merged2_phi_darelinear_1|0.5853|± |0.0253| +| | | |._merged2_phi_darelinear_3|0.5853|± |0.0253| +| | | |._merged2_phi_darelinear_5|0.6089|± |0.0250| +| | | |._merged2_phi_darelinear_7|0.6273|± |0.0248| +| | | |._merged2_phi_darelinear_9|0.6273|± |0.0248| +| | | |._merged2_phi_linear_1 |0.5696|± |0.0254| +| | | |._merged2_phi_linear_3 |0.6089|± |0.0250| +| | | |._merged2_phi_linear_5 |0.6273|± |0.0248| +| | | |._merged2_phi_linear_7 |0.6299|± |0.0248| +| | | |._merged2_phi_linear_9 |0.6220|± |0.0249| +| | | |._merged2_phi_ties_1 |0.6247|± |0.0248| +| | | |._merged2_phi_ties_3 |0.5774|± |0.0253| +| | | |._merged2_phi_ties_5 |0.5958|± |0.0252| +| | | |._merged2_phi_ties_7 |0.6273|± |0.0248| +| | | |._merged2_phi_ties_9 |0.6299|± |0.0248| +|mm\|arc_easy\|0 | 0|sem |._merged2_phi_darelinear_1|0.6410|± |0.0156| +| | | |._merged2_phi_darelinear_3|0.6399|± |0.0156| +| | | |._merged2_phi_darelinear_5|0.6167|± |0.0158| +| | | |._merged2_phi_darelinear_7|0.6230|± |0.0158| +| | | |._merged2_phi_darelinear_9|0.6241|± |0.0157| +| | | |._merged2_phi_linear_1 |0.6463|± |0.0155| +| | | |._merged2_phi_linear_3 |0.6262|± |0.0157| +| | | |._merged2_phi_linear_5 |0.6294|± |0.0157| +| | | |._merged2_phi_linear_7 |0.6283|± |0.0157| +| | | |._merged2_phi_linear_9 |0.6241|± |0.0157| +| | | |._merged2_phi_ties_1 |0.6146|± |0.0158| +| | | |._merged2_phi_ties_3 |0.6241|± |0.0157| +| | | |._merged2_phi_ties_5 |0.6146|± |0.0158| +| | | |._merged2_phi_ties_7 |0.6199|± |0.0158| +| | | |._merged2_phi_ties_9 |0.6304|± |0.0157| +|mm\|commonsenseqa\|0| 0|sem |._merged2_phi_darelinear_1|0.5281|± |0.0280| +| | | |._merged2_phi_darelinear_3|0.4938|± |0.0280| +| | | |._merged2_phi_darelinear_5|0.5344|± |0.0279| +| | | |._merged2_phi_darelinear_7|0.5312|± |0.0279| +| | | |._merged2_phi_darelinear_9|0.5188|± |0.0280| +| | | |._merged2_phi_linear_1 |0.4781|± |0.0280| +| | | |._merged2_phi_linear_3 |0.5281|± |0.0280| +| | | |._merged2_phi_linear_5 |0.5188|± |0.0280| +| | | |._merged2_phi_linear_7 |0.5031|± |0.0280| +| | | |._merged2_phi_linear_9 |0.5000|± |0.0280| +| | | |._merged2_phi_ties_1 |0.5156|± |0.0280| +| | | |._merged2_phi_ties_3 |0.5188|± |0.0280| +| | | |._merged2_phi_ties_5 |0.4969|± |0.0280| +| | | |._merged2_phi_ties_7 |0.5062|± |0.0280| +| | | |._merged2_phi_ties_9 |0.4906|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|._merged2_phi_darelinear_1|0.7204|± |0.0213| +| | | |._merged2_phi_darelinear_3|0.7494|± |0.0205| +| | | |._merged2_phi_darelinear_5|0.7315|± |0.0210| +| | | |._merged2_phi_darelinear_7|0.7405|± |0.0208| +| | | |._merged2_phi_darelinear_9|0.7405|± |0.0208| +| | | |._merged2_phi_linear_1 |0.7472|± |0.0206| +| | | |._merged2_phi_linear_3 |0.7315|± |0.0210| +| | | |._merged2_phi_linear_5 |0.7092|± |0.0215| +| | | |._merged2_phi_linear_7 |0.7002|± |0.0217| +| | | |._merged2_phi_linear_9 |0.7271|± |0.0211| +| | | |._merged2_phi_ties_1 |0.7338|± |0.0209| +| | | |._merged2_phi_ties_3 |0.7562|± |0.0203| +| | | |._merged2_phi_ties_5 |0.7427|± |0.0207| +| | | |._merged2_phi_ties_7 |0.7271|± |0.0211| +| | | |._merged2_phi_ties_9 |0.7450|± |0.0206| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|._merged2_phi_darelinear_1|0.7500|± |0.0693| +| | | |._merged2_phi_darelinear_3|0.8250|± |0.0608| +| | | |._merged2_phi_darelinear_5|0.8000|± |0.0641| +| | | |._merged2_phi_darelinear_7|0.7500|± |0.0693| +| | | |._merged2_phi_darelinear_9|0.8500|± |0.0572| +| | | |._merged2_phi_linear_1 |0.8500|± |0.0572| +| | | |._merged2_phi_linear_3 |0.8500|± |0.0572| +| | | |._merged2_phi_linear_5 |0.8500|± |0.0572| +| | | |._merged2_phi_linear_7 |0.8000|± |0.0641| +| | | |._merged2_phi_linear_9 |0.7500|± |0.0693| +| | | |._merged2_phi_ties_1 |0.8000|± |0.0641| +| | | |._merged2_phi_ties_3 |0.7750|± |0.0669| +| | | |._merged2_phi_ties_5 |0.7000|± |0.0734| +| | | |._merged2_phi_ties_7 |0.8500|± |0.0572| +| | | |._merged2_phi_ties_9 |0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |._merged2_phi_darelinear_1|0.3471|± |0.0435| +| | | |._merged2_phi_darelinear_3|0.3802|± |0.0443| +| | | |._merged2_phi_darelinear_5|0.3223|± |0.0427| +| | | |._merged2_phi_darelinear_7|0.2893|± |0.0414| +| | | |._merged2_phi_darelinear_9|0.3554|± |0.0437| +| | | |._merged2_phi_linear_1 |0.3388|± |0.0432| +| | | |._merged2_phi_linear_3 |0.3388|± |0.0432| +| | | |._merged2_phi_linear_5 |0.3471|± |0.0435| +| | | |._merged2_phi_linear_7 |0.3554|± |0.0437| +| | | |._merged2_phi_linear_9 |0.3140|± |0.0424| +| | | |._merged2_phi_ties_1 |0.3140|± |0.0424| +| | | |._merged2_phi_ties_3 |0.3140|± |0.0424| +| | | |._merged2_phi_ties_5 |0.3554|± |0.0437| +| | | |._merged2_phi_ties_7 |0.3223|± |0.0427| +| | | |._merged2_phi_ties_9 |0.3223|± |0.0427| + diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ea167ecedfc5f4c21a526f865fc354c64d78497f --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9264616877c597d31b8b0294b97dd9d747563f11f50289cd986790cfba1c8f66 +size 3512645 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d676a2adb2f0d98fb4017e8cd0b6072e81711835 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7682d8fa722ece50be3a5d1524c02be894d8edee81d276ed1f7ee4768d2e576 +size 8156916 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b8e2fb025808899d5be23f7ddcdce63c375c3d18 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc6fe7c6d673953339d7dcece3c9fdf448a0003f83d5affb979aca8b2ad16ea9 +size 2864819 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2509f9910d7034b532102ddae758121081675c1a --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:087538ab47a02b1a94acd3bc380773f36af2f4bbd5dad3b68c94ed69265b8666 +size 3042162 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4957059c7ecda7bf09507c50d3a5a99c32dc0bcb --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b200f2bbfd2c298af54c484541a0d198ddb44b4415ef84b2a96ebe5951986997 +size 317268 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f9532a558b1f1367cf2e13df0928153d5b2d1624 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f095e09521d905e0b1c192da07c3210239cef44f56672d8ddfa210e59cba0cc7 +size 1152232 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..66c5fd1e9a5c5b7423ee643fb09f0b6466bb3c3d --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08f9641deb4e771c97dca4a27310bf62dc5a385cc3224b11d2015cf7feab016a +size 3531838 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fdbd8c9132cef9176eb00840f795385f7189c5ae --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31c07bb88bf83ba1476275ad9d3e40e96166784cfb24edf5cf9538514bbe8bed +size 8164421 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2247667e095b3bfba45c1197fe86099bacf5ae5d --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5d2e5cd76770bcfcb54ab73bb16ef202d6d327ea9689d84129982c423616155 +size 2853855 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..695e4e6d9a2139d97476b7bb7ad88ea6565ec8aa --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d1a1448476cda2b62eacfe614d352f5cd74ee0080a29ae467d4267eb4ba40f1 +size 3039114 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b29519132bdf891ced29054288cd600f33b276c7 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23ba230fc553f11ef6634fd41351dbf995b23e72372e0ef53c57c8df5fb8e883 +size 316948 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..497ea45e2000809cf992a394ba524a03ed186953 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af5e9195eca73dfc36946fb4da16e77c9372d53e64bd2ff9009f8d47815eaa2a +size 1159845 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5adf92be1dec0d89695e73e004b783cd0fe55739 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18e058617b3a97dda81db9e7c1a5e300f6ae9c323ec2b10a69febbbfabadefeb +size 3523883 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7a57c9a64ebc766220e1d2f90039d82386f20d78 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48d79c7cbb08e2629753833f02251be9038ff54ed6c1fe2006f74d5a259d419a +size 8174508 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..25d14e4b6a0d85128067e2519944c0b7f4d4834c --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbfc8247daebed86823508dea15793be47061486ba8ae8c85854ee5977391a36 +size 2867055 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7bd571029d522a7e0ccc9ceae86247c4b122f95c --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fbecead02797fe96653028095edc0c70661fecfbba5501f9f12a7f84b0b7ee5 +size 3040248 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..90aa9224bc18d3d3482b59bbdb2874a6b1e4f07c --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f487c3a27a2aa5027542d65467dfe432b7e5bda9675427aa7ca3528278024820 +size 316808 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..42a70d6b3d3b372081b300d037964adc5f41069b --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:267b49c4c529aa31ce844e4a4d39a0cd84cc7af91599f19dce41153516fd7770 +size 1152142 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a1dc8faa3a71bbbe58f33fbf0c189ae5eb863600 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8d616c4f33b577cf8ffb9ab14e7372acdb9e82903a318bacf19be79b11c6430 +size 3524588 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f6a5a3fea02c053fe0b04fe43b897bb0866ee42a --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7d7ecdcf1c89cc6a486d614680225d93e4f0428fce2bef3c047b01858560287 +size 8160964 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..927cd22888ffdb1a0dae27a37a821eb9fc0f8a3f --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:982d1165fec2ebdad468678a79de52a9c21a84d86d2ba7d7e019e080ec34c542 +size 2867515 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..02bee6e3cb5fd483fb2606573dc72a4f949a2545 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e67d9b515d850432079e0a27eb25987cd2a4d6bc679e744079987e3bf503fee +size 3037738 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6746db6ca81b5ac42178044a474a8e2ab70ae2fc --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28f6cd5654b393480d9c909e6f64230e837408473d02a4e882061482578a16cc +size 316487 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6c43d57770a747b9b097863d1855720c0246a07b --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c9a953e6fe47d1be9802aa264fb4f1f9e233be8b130e25acd26f539ee512da1 +size 1147526 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ef699d8c60ce5adaa48b51f6420196a1c336fc34 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ca45d9ec1daedd479c5ce0a5bd2b9a8f247fddce5b632420b723a5649fc3e4e +size 3527046 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..052ef25dffa9dcf344c463edfee1166ac8cb7879 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce9206920bb4f06a4a2c0e26c58d820b7cbe77d1d3da8082f05e7afb8c6a1f1e +size 8159910 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..db6a9c5e0501e5c5c0d51ee765dce6e4d593ed4b --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a9d3257866cb9a6c79c0f05806445f30ad9f08b885a237adaa4098b65abddf5 +size 2865791 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..eef51f7b268e657b2eaae12c24055cecb7d52248 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c7f0cd050d1fdf832dfb7c48131cafa2f0916e4370c99fa4197515df7e31679 +size 3038150 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d30feefd993dbe961d90849e27035c3459bc7654 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed5223fb9acfd3573fca02d801dea557e968a6dd3e511e744c7891c55bc07a92 +size 317586 diff --git a/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7af5ee7ce9db3099177279929ccf00fe28b83762 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_darelinear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa6c1e60ebc40c04f82335681c45449f4677fd7ade2679c2f82e4025e1430f7d +size 1151177 diff --git a/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3b31abe8ca3545b69acdb7c0e7e9e7421ced4858 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74547f69e25ef99dca7ddb8d31ca115f4b97a3ba431d8dcde43592693299ffa8 +size 3521168 diff --git a/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4f9981f2f4a7fc4a41c05ef38c354f95bc524ff0 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e06011ad4911d7df8a2d3a0a567539086d516633907fb6a67b15270d0c01396 +size 8171023 diff --git a/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3cb1aee2261bcfbd88d2cb2aaeae62e2b918f8cc --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e79c06c15a9b971ee3bd25e33f1a7f7f754e89478fb690d2f4389f03976b2c50 +size 2858925 diff --git a/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dc52181526b5a043d41b9907a4f3f049ce0ac4d0 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d745d9a5759b7601261151459ea2ecf21a051f1c7dbf866031d984f159a8b82 +size 3040892 diff --git a/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a9958f265af01ac1e763702dfa23b865d25fc217 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13dcd5509229e73862271074d83348554bc01de9705a2ac6d76d212f8ff56893 +size 317235 diff --git a/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..24a6bb5df7f0fc4e30436e11b4673e17a6a6ecc0 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba3d186df72766d476440af00cdb181985aee7842e4ef4ed19e7b904c5109d75 +size 1156591 diff --git a/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0aa627b7f8d3474bdc34472a5817509f16c564f0 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24f1fae88f520d93fba1f8c51d3d70df3aa4764bcc1ddb0eeafa3b1de4fb1f58 +size 3526621 diff --git a/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..625082e1f15de5669b2a228dbace4a803305b073 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3a569d0d610fe9ccec6788442b356a0e167d1aa88249ea52a6891c6b79fce05 +size 8153473 diff --git a/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..16f57a92f89f4a36243e36ce7db01e8509c15ba8 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ec37ef957014ba4c5db575d3147166321e37ccae2fcc5c584b903356bb41ddf +size 2863345 diff --git a/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ea5e1326737943e7dd6439bd47137b6a27811b21 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3666c6cfe7232dbc1b20094c55adcc933f684eb8604e4cd4f4e854da267c3a84 +size 3042919 diff --git a/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d82f3ce91ea2d84a9c0350cb98436f5908bb323b --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5248da9b46e79c23a18e2f9a01557548247542363a2cbf34a490904deac275ee +size 316413 diff --git a/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9bdf29f6fd05ccb36593b09b4911d429aab5ef55 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a414c5da9415b1a37a8493ec561045cfbdddb0131af4ee1152de8af745e9a40 +size 1153035 diff --git a/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8ee4fa2c8ef8a651524553ac18c6291047698756 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea641f302f87bb8374a9da20c225390e01a5bc07da8c4f0a8ccfbaec7e38da67 +size 3519087 diff --git a/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab0267e7f8316aeb170043f98125965c968e04b7 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a04141b29b49a61782942d654b55980d3af31bcb553d4957599b8ad1dd69ce3 +size 8155193 diff --git a/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4744b4e44231de40bdf5fd2db5a02c3150841f8a --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d7887d442aa31a741a99afe4df074c5f3473c93c61f8cf8d40f289b3bee3d94 +size 2856478 diff --git a/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d30db1bc61678a7087084a53c30bc4fad93baec0 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34ba61efe0498830bcc2cde2060d469d9aa1c1d2e3968e6a7bf7761380c286c8 +size 3038430 diff --git a/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..379cfa1a7504821213f117ed77e9976623b88404 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8e11951dbf21d7a08fcba64d6913ccea6169c0f8da3e52d36e8a593c59435bc +size 317630 diff --git a/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..085adf460b9bd45bb871019ac2b18a7c4449a79b --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38770d24a3a9506c51d7a30dca403d5532098182380161ac6488c277830d771a +size 1148783 diff --git a/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b24f62fc65c88c2bae9ebeb77607780cb59be271 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81a0324554f0733f2407981c6b22b43d8fce19e80616aede580aa64a46a907b8 +size 3522457 diff --git a/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0155fc0acab3a5e6e00da4b5df23ab02c2f16839 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7f6be0ea6068d042d5b655588b15933c30387182552af7aa7285c06f0b65dc3 +size 8159101 diff --git a/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5ae80f28a8828b8096fd73285e80d38e516e1eb9 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9abd1876d7f19e3fe1135147ac702c751aaa143d7404a2cdde1aa74f0beaf35 +size 2861983 diff --git a/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..254c616ca0f14e51556cd5becc6da18546364ce3 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31f333cfb2a963c10fdd45973a766958a8fa1a4d0de9fd89302726a7fb0346ea +size 3039339 diff --git a/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7bd90699bf64e77c894c93fcbfd02735792958ea --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85c9df3a11017c6bab6daf0c6b3eb911a249a2ffea93ff1ccd77862a0862b687 +size 317462 diff --git a/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a4c01534c12b4dbb8e76232edec065109cfe29e1 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c1b4adf9ae1bfb2125e12bc3307363846994718e9cb7749e6539168f53e1d93 +size 1147757 diff --git a/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4cc8ff68f60fd57bcebb3aaafd489ae93e4766fc --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0477bedd21bfc8c8ecbca7d82294ebc58a7afed9f1039511425a4e80c8b61c1 +size 3527104 diff --git a/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a1270a1a461e133bab022489defad0fb3536abc7 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc82d3796974816725d460223170cffa715815e601f7951d366cadc261dad12f +size 8158341 diff --git a/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cebfa5cc5582fc2f1c1b70590741fe6522baa37d --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e28573853da031d2d2f3543011962c646493d4b8f22b66d6bdbf04bb2d7c42d +size 2866853 diff --git a/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c8c558dfaa3f996536a7d71de4faf4a60c2276d8 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6807e916552ad623727753fc2ad2a6ceeed0e38cd70b204d1c0c30721411413b +size 3040552 diff --git a/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..898fcb52243ec5d4021828375183a75e71070743 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5581b303b94b3e8535cf4801acd99f76ef0037525ebb85c46eeb724f412a0c1d +size 317207 diff --git a/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..58e762d61b489c0c79e34ceb02bae25f83ca7007 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_linear_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfa1050a91c5b385da0e63e7484d91b438219e70f45fdffeef46336a71ac47cc +size 1155776 diff --git a/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e189e9e1839e8d0fafc42004d3c66cf8faab5a53 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b496cb0b0369f8808d9e49f7945766dedd8df561a912f2e75890225be5e954f +size 3524904 diff --git a/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3264dfeb23ace2d34fb44a2c8cc3393d238d4594 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98dd038f40188578569ad2516a6d4d49cbb28310aeaa255bbb91c460874e1499 +size 8167849 diff --git a/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2e477423d0c4a7c42791c97d896828ebb8384db4 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9b238c9750a1ff43768f6848e9068608b2bf87083cd67ca34f7296ce2861c96 +size 2865076 diff --git a/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a2d2311cdc1d1e1c374685c1865eb1d0a8316681 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba2366c223b52be8f5b3944f169cb1f4397c5877dafdf3d6edccbc89685ff08a +size 3039016 diff --git a/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..377b0ee3151a0578ef2ebc877f9a19e87fea3385 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5b75b643ce06441989db8a3965d237a581693fef89cc9767d17d53dfa7f7caa +size 316807 diff --git a/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..16e89695f1641047d2641a91c04376859940a8e3 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_1/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68594a2ed1de00007e692c4ac67b1f79646957cf181e790514be1805840d50ed +size 1151172 diff --git a/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d4b2f7f8aede3ce1dd49a0ac36059d1f1c8de2af --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78718e9459065844128aee99ba87d17d5e8f41239872399ccebe58f15af323be +size 3512561 diff --git a/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0af5964648f4a6daee96dab1143672da5df7aef2 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:004173edd314a70633c9494e3b6b750bee5b1550acc0c96f000a46dc64d1f4ed +size 8156053 diff --git a/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e324a734a6a71312059ddf0782656338900cdd7 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7936847895f16657f61be2ceef5b1019afdc3b6de1094e102ce4c191f4944099 +size 2867869 diff --git a/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f067eb986fd59dae28d45ac3d317d4169234d519 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22a6407a7a2c32f0a52c6651ae14320e2ab9e5e8d6e4ecb88e5c80b1eeffdcaa +size 3041915 diff --git a/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..43c36c1a01ef0ee7e8cd990891fc6f08179013a4 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:140630cd77a6c8164b132b5e1e6970bab8b59f033ba2e243cea3d3399cdb1395 +size 316757 diff --git a/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ed51a753bbad930ca57d4d4d5d4c3e70007e6f6 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_3/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e7d929a5715de330d3d1d840bfaf92a504b253b6d3d3b6ecd7c7a825157e63c +size 1155589 diff --git a/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2c1ed199b7d1a02d6b92d235b26bc1e5b1f3dad6 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebdfbca69426ca53c6f84594449f2350fc5c691bf8a4f174a122be3ea9564728 +size 3518750 diff --git a/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9d8ab75c8989bcdda32469b7a09bd02c43d90b98 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:200456b465412b890d88d351051f6c378ea9f49113c009588e28b413db16c7d7 +size 8157534 diff --git a/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5c9505719d28753ba976d0074658a964a03043c5 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05b330afada56ad71670fabe7c9627c2942ad8ddc301134385afd8ec65b41114 +size 2867876 diff --git a/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f8bce90f1c4bca9fb25ecab4ea0936b0333d4863 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:983a4e91d97280ec70ae6a85179f2aa0bd706ffc4f3f0df4a403e09af7d3f09e +size 3037836 diff --git a/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5b9f7c1048ef8d1d91c1bb13da9abe9b0e3e9ef4 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:390b65080ab0f75471d579b3f267428f5c65b6d086b4fda7ee965f6d8dd5000c +size 316270 diff --git a/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..653021392cc71d5bd86db36497d4dfd47329fbeb --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_5/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f430f0b13b5498d1847bc0cb5f65fc4fd7b1eefc7458e32bc35e031f4c121cd5 +size 1150833 diff --git a/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c1944a6d5e8f166719c4aed5f7478ea9a1623d60 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4c7c7a1f74e447633b65b1b473bddfaf6a0a3936d0a3203a664aba270413e4d +size 3524687 diff --git a/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5b10be6fd02c3e8b2cf80ca4e8a47e34faf184a9 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f612a26caf8a9c171a4d7f8a077ba009225cec33be494abbd4363d473005240e +size 8156520 diff --git a/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..173b84187bb7127004bf73b82beb4838a917a0d8 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:394c3b6a803577972ce56149164afc5f1560141b8c1c181182c197a074caef3d +size 2859067 diff --git a/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e3244d55479190d3092f408ebc5c3e74441c8e54 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67e481acfba532ed435775c5af580319087e793c65426e8b3180c2793f95fd1b +size 3040283 diff --git a/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4ef3e5040d4e8ef1cb6d8f10213e12d500b3ef23 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11357ce756a754f4bb7965d97df91850aa5d28c6d809c673ba01aff71eb31142 +size 317544 diff --git a/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a7a9d6fbbc4cf67e8883aba227b9a2aca4f5670d --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_7/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa36612833056f141e4a5ffb887167803666fd8d0378854cccde6154230ed0f1 +size 1151346 diff --git a/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a847828c11fcda8149a258ad0ca208cc3b1c4ea2 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a79f26aff3d170ae8d387312f5c6fa46f19bbb848ab18d24544078cb72855aad +size 3515086 diff --git a/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..81c01126842d8414ed34d47e2ae150b0d72977b1 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d17c1a0c5264637dc4282c5eef69e3106077551636cb07e6a6b265faa1b0ac6c +size 8154864 diff --git a/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5cd7b0fb2aa0bb2c184b3e50c028748595518d86 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec3468bc42c20054a8f325f24fecb2497ad6cc87385ef8e3b02a6d8791d0e2e +size 2857379 diff --git a/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..98eae1e34fdd94be3c3614a0e39b8abc9e66932c --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:465807fc60ac00187a4a1a4e6a541798e6cff35a5c68d0edebd106f49030c5be +size 3039603 diff --git a/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..863b29bc6502659c312e5c7a8695483718c7d113 --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7efa20d17ae1c1bca06e4ad5286cdd7d8a02959b8c9be0c749aaa9011c603da5 +size 315829 diff --git a/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..005f7945a5c06add4000831047b6603cc334d86c --- /dev/null +++ b/merge_bench1/outputs/._merged2_phi_ties_9/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a749efe3c4abd30d0694d9e009ba4e330f8341c079cc5026a0fa1ab18a36730 +size 1148266 diff --git a/merge_bench1/results/._merged2_phi_darelinear_1/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_darelinear_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..43cef5a69254351539736b444bb630826e7fa6a8 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_darelinear_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.528125, + "sem_stderr": 0.027950302087016623 + }, + "mm|truthfulqa|0": { + "sem": 0.34710743801652894, + "sem_stderr": 0.04345724570292534 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.75, + "math_pass@1:1_samples_stderr": 0.06933752452815363 + }, + "mm|arc_challenge|0": { + "sem": 0.5853018372703412, + "sem_stderr": 0.025273430846095755 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7203579418344519, + "math_pass@1:1_samples_stderr": 0.021252400416809914 + }, + "mm|arc_easy|0": { + "sem": 0.6409714889123548, + "sem_stderr": 0.015596898893325599 + }, + "all": { + "sem": 0.5253764410498063, + "sem_stderr": 0.028069469382340832, + "math_pass@1:1_samples": 0.735178970917226, + "math_pass@1:1_samples_stderr": 0.04529496247248177 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_darelinear_3/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_darelinear_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..203fe48f62a594b5e0febfaaf46b8a53a219d65a --- /dev/null +++ b/merge_bench1/results/._merged2_phi_darelinear_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.49375, + "sem_stderr": 0.027992438382232313 + }, + "mm|truthfulqa|0": { + "sem": 0.38016528925619836, + "sem_stderr": 0.04431324501968431 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.060843430844447585 + }, + "mm|arc_challenge|0": { + "sem": 0.5853018372703412, + "sem_stderr": 0.025273430846095745 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7494407158836689, + "math_pass@1:1_samples_stderr": 0.020519012798668794 + }, + "mm|arc_easy|0": { + "sem": 0.6399155227032735, + "sem_stderr": 0.015606946957923144 + }, + "all": { + "sem": 0.5247831623074533, + "sem_stderr": 0.028296515301483877, + "math_pass@1:1_samples": 0.7872203579418344, + "math_pass@1:1_samples_stderr": 0.040681221821558186 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_darelinear_5/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_darelinear_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..c6bdb91a644b512970ce8dc919537028f1fbd3ee --- /dev/null +++ b/merge_bench1/results/._merged2_phi_darelinear_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.534375, + "sem_stderr": 0.02792838801246723 + }, + "mm|truthfulqa|0": { + "sem": 0.32231404958677684, + "sem_stderr": 0.04266416363352168 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203487 + }, + "mm|arc_challenge|0": { + "sem": 0.6089238845144357, + "sem_stderr": 0.025033429616703247 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7315436241610739, + "math_pass@1:1_samples_stderr": 0.020984061328633327 + }, + "mm|arc_easy|0": { + "sem": 0.6166842661034847, + "sem_stderr": 0.015807536339366567 + }, + "all": { + "sem": 0.5205743000511743, + "sem_stderr": 0.02785837940051468, + "math_pass@1:1_samples": 0.765771812080537, + "math_pass@1:1_samples_stderr": 0.0425176614253341 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_darelinear_7/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_darelinear_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..fd3b1df88ebc7c86efbf995a36e787743ace6d08 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_darelinear_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.53125, + "sem_stderr": 0.0279398950447155 + }, + "mm|truthfulqa|0": { + "sem": 0.2892561983471074, + "sem_stderr": 0.04139112727635463 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.75, + "math_pass@1:1_samples_stderr": 0.06933752452815363 + }, + "mm|arc_challenge|0": { + "sem": 0.6272965879265092, + "sem_stderr": 0.024804264208008724 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7404921700223713, + "math_pass@1:1_samples_stderr": 0.020757165068944216 + }, + "mm|arc_easy|0": { + "sem": 0.6230200633579726, + "sem_stderr": 0.015756674615478247 + }, + "all": { + "sem": 0.5177057124078973, + "sem_stderr": 0.027472990286139278, + "math_pass@1:1_samples": 0.7452460850111857, + "math_pass@1:1_samples_stderr": 0.045047344798548924 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_darelinear_9/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_darelinear_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..577fe8c4703c128e571c48af7db738a471d2afb8 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_darelinear_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.51875, + "sem_stderr": 0.0279749349017763 + }, + "mm|truthfulqa|0": { + "sem": 0.35537190082644626, + "sem_stderr": 0.04369236326573981 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.6272965879265092, + "sem_stderr": 0.024804264208008728 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7404921700223713, + "math_pass@1:1_samples_stderr": 0.020757165068944216 + }, + "mm|arc_easy|0": { + "sem": 0.6240760295670539, + "sem_stderr": 0.015747919721903637 + }, + "all": { + "sem": 0.5313736295800023, + "sem_stderr": 0.028054870524357116, + "math_pass@1:1_samples": 0.7952460850111857, + "math_pass@1:1_samples_stderr": 0.03896717627931538 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_linear_1/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_linear_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..40c6a1314de12256c14b3979463a0706b3c043a6 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_linear_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.478125, + "sem_stderr": 0.027967820983765136 + }, + "mm|truthfulqa|0": { + "sem": 0.33884297520661155, + "sem_stderr": 0.0432076780753667 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.5695538057742782, + "sem_stderr": 0.025400076051601553 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7472035794183445, + "math_pass@1:1_samples_stderr": 0.0205796271719225 + }, + "mm|arc_easy|0": { + "sem": 0.6462513199577613, + "sem_stderr": 0.015545423839085257 + }, + "all": { + "sem": 0.5081932752346627, + "sem_stderr": 0.028030249737454663, + "math_pass@1:1_samples": 0.7986017897091722, + "math_pass@1:1_samples_stderr": 0.038878407330804526 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_linear_3/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_linear_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..07aa2bbdab2e727bd78a117ce38a7085113b302e --- /dev/null +++ b/merge_bench1/results/._merged2_phi_linear_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.528125, + "sem_stderr": 0.02795030208701662 + }, + "mm|truthfulqa|0": { + "sem": 0.33884297520661155, + "sem_stderr": 0.04320767807536671 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.6089238845144357, + "sem_stderr": 0.025033429616703247 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7315436241610739, + "math_pass@1:1_samples_stderr": 0.02098406132863331 + }, + "mm|arc_easy|0": { + "sem": 0.6261879619852164, + "sem_stderr": 0.01573017051830343 + }, + "all": { + "sem": 0.5255199554265659, + "sem_stderr": 0.027980395074347502, + "math_pass@1:1_samples": 0.7907718120805369, + "math_pass@1:1_samples_stderr": 0.03908062440915993 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_linear_5/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_linear_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..c3ee4a505e1e87aca33b53767f05702a67abf44f --- /dev/null +++ b/merge_bench1/results/._merged2_phi_linear_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.51875, + "sem_stderr": 0.027974934901776313 + }, + "mm|truthfulqa|0": { + "sem": 0.34710743801652894, + "sem_stderr": 0.04345724570292535 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.6272965879265092, + "sem_stderr": 0.024804264208008724 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.70917225950783, + "math_pass@1:1_samples_stderr": 0.021504351726262055 + }, + "mm|arc_easy|0": { + "sem": 0.6293558606124604, + "sem_stderr": 0.01570294613303807 + }, + "all": { + "sem": 0.5306274716388746, + "sem_stderr": 0.027984847736437113, + "math_pass@1:1_samples": 0.7795861297539151, + "math_pass@1:1_samples_stderr": 0.03934076960797431 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_linear_7/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_linear_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..78807a0f931650111712574f5df27adeb35c1ad1 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_linear_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.503125, + "sem_stderr": 0.02799407877242281 + }, + "mm|truthfulqa|0": { + "sem": 0.35537190082644626, + "sem_stderr": 0.04369236326573981 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203487 + }, + "mm|arc_challenge|0": { + "sem": 0.6299212598425197, + "sem_stderr": 0.024768425689985787 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7002237136465325, + "math_pass@1:1_samples_stderr": 0.021694498511850818 + }, + "mm|arc_easy|0": { + "sem": 0.6282998944033791, + "sem_stderr": 0.01571210118921751 + }, + "all": { + "sem": 0.5291795137680863, + "sem_stderr": 0.028041742229341478, + "math_pass@1:1_samples": 0.7501118568232663, + "math_pass@1:1_samples_stderr": 0.042872880016942845 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_linear_9/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_linear_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..4afa49780f192223f23d4b5af6a6adfd05ae44d9 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_linear_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.5, + "sem_stderr": 0.027994625547792713 + }, + "mm|truthfulqa|0": { + "sem": 0.3140495867768595, + "sem_stderr": 0.04236964753041017 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.75, + "math_pass@1:1_samples_stderr": 0.06933752452815363 + }, + "mm|arc_challenge|0": { + "sem": 0.6220472440944882, + "sem_stderr": 0.024873599945204095 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.727069351230425, + "math_pass@1:1_samples_stderr": 0.02109340266931674 + }, + "mm|arc_easy|0": { + "sem": 0.6240760295670539, + "sem_stderr": 0.015747919721903637 + }, + "all": { + "sem": 0.5150432151096004, + "sem_stderr": 0.02774644818632765, + "math_pass@1:1_samples": 0.7385346756152125, + "math_pass@1:1_samples_stderr": 0.045215463598735184 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_ties_1/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_ties_1/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..4f1891e7264ca733b81384e1b5877729bd600f10 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_ties_1/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.515625, + "sem_stderr": 0.027980952958187033 + }, + "mm|truthfulqa|0": { + "sem": 0.3140495867768595, + "sem_stderr": 0.04236964753041017 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.8, + "math_pass@1:1_samples_stderr": 0.06405126152203486 + }, + "mm|arc_challenge|0": { + "sem": 0.6246719160104987, + "sem_stderr": 0.024839321191582626 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7337807606263982, + "math_pass@1:1_samples_stderr": 0.02092837218333681 + }, + "mm|arc_easy|0": { + "sem": 0.614572333685322, + "sem_stderr": 0.015823858012624415 + }, + "all": { + "sem": 0.51722970911817, + "sem_stderr": 0.02775344492320106, + "math_pass@1:1_samples": 0.7668903803131991, + "math_pass@1:1_samples_stderr": 0.04248981685268584 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_ties_3/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_ties_3/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..017d2bd51dbe38b29c751b154f55aab9734949de --- /dev/null +++ b/merge_bench1/results/._merged2_phi_ties_3/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.51875, + "sem_stderr": 0.0279749349017763 + }, + "mm|truthfulqa|0": { + "sem": 0.3140495867768595, + "sem_stderr": 0.04236964753041018 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.775, + "math_pass@1:1_samples_stderr": 0.06686668711812967 + }, + "mm|arc_challenge|0": { + "sem": 0.5774278215223098, + "sem_stderr": 0.02534005215663172 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.756152125279642, + "math_pass@1:1_samples_stderr": 0.02033277461576782 + }, + "mm|arc_easy|0": { + "sem": 0.6240760295670539, + "sem_stderr": 0.015747919721903637 + }, + "all": { + "sem": 0.5085758594665558, + "sem_stderr": 0.027858138577680457, + "math_pass@1:1_samples": 0.765576062639821, + "math_pass@1:1_samples_stderr": 0.043599730866948745 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_ties_5/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_ties_5/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..fcddba35102cb99fe158fd34a62e6ae14e439446 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_ties_5/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.496875, + "sem_stderr": 0.02799407877242281 + }, + "mm|truthfulqa|0": { + "sem": 0.35537190082644626, + "sem_stderr": 0.04369236326573981 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.7, + "math_pass@1:1_samples_stderr": 0.07337993857053426 + }, + "mm|arc_challenge|0": { + "sem": 0.5958005249343832, + "sem_stderr": 0.025174248508215997 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7427293064876958, + "math_pass@1:1_samples_stderr": 0.02069869711525502 + }, + "mm|arc_easy|0": { + "sem": 0.614572333685322, + "sem_stderr": 0.01582385801262442 + }, + "all": { + "sem": 0.5156549398615379, + "sem_stderr": 0.02817113713975076, + "math_pass@1:1_samples": 0.7213646532438478, + "math_pass@1:1_samples_stderr": 0.04703931784289464 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_ties_7/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_ties_7/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..bfff6e3d7c96807b5a7e1748c7511803a59dcb94 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_ties_7/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.50625, + "sem_stderr": 0.027992438382232313 + }, + "mm|truthfulqa|0": { + "sem": 0.32231404958677684, + "sem_stderr": 0.04266416363352167 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.85, + "math_pass@1:1_samples_stderr": 0.05717718748968655 + }, + "mm|arc_challenge|0": { + "sem": 0.6272965879265092, + "sem_stderr": 0.024804264208008724 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.727069351230425, + "math_pass@1:1_samples_stderr": 0.021093402669316762 + }, + "mm|arc_easy|0": { + "sem": 0.6198521647307286, + "sem_stderr": 0.01578246205338556 + }, + "all": { + "sem": 0.5189282005610036, + "sem_stderr": 0.027810832069287066, + "math_pass@1:1_samples": 0.7885346756152125, + "math_pass@1:1_samples_stderr": 0.039135295079501656 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/merge_bench1/results/._merged2_phi_ties_9/results_2025-06-23T01-52-10.258150.json b/merge_bench1/results/._merged2_phi_ties_9/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..b1baacb311ddfe2270812238ed309dbf1504bd69 --- /dev/null +++ b/merge_bench1/results/._merged2_phi_ties_9/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.490625, + "sem_stderr": 0.02798970418494101 + }, + "mm|truthfulqa|0": { + "sem": 0.32231404958677684, + "sem_stderr": 0.04266416363352167 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.060843430844447585 + }, + "mm|arc_challenge|0": { + "sem": 0.6299212598425197, + "sem_stderr": 0.024768425689985787 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7449664429530202, + "math_pass@1:1_samples_stderr": 0.020639519855874682 + }, + "mm|arc_easy|0": { + "sem": 0.6304118268215417, + "sem_stderr": 0.01569371062889389 + }, + "all": { + "sem": 0.5183180340627096, + "sem_stderr": 0.02777900103433559, + "math_pass@1:1_samples": 0.78498322147651, + "math_pass@1:1_samples_stderr": 0.040741475350161135 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/logs/I-Phi4.log b/test/0-1k/logs/I-Phi4.log new file mode 100644 index 0000000000000000000000000000000000000000..4cf9ca50f7807034371f8d2733997843b460a4d3 --- /dev/null +++ b/test/0-1k/logs/I-Phi4.log @@ -0,0 +1,13 @@ +INFO 06-27 00:01:57 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-27 00:01:57 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |math_pass@1:1_samples|0.9676|± |0.0058| +| | |sem |0.8825|± |0.0188| +|mm\|arc_challenge\|0| 0|sem |0.9501|± |0.0112| +|mm\|arc_easy\|0 | 0|sem |0.9768|± |0.0049| +|mm\|commonsenseqa\|0| 0|sem |0.8344|± |0.0208| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9351|± |0.0117| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.7686|± |0.0385| + diff --git a/test/0-1k/logs/M-Phi4-10L.log b/test/0-1k/logs/M-Phi4-10L.log new file mode 100644 index 0000000000000000000000000000000000000000..a1655a76cbf4e67f28c561cfcbca7448472cc0d9 --- /dev/null +++ b/test/0-1k/logs/M-Phi4-10L.log @@ -0,0 +1,79 @@ +INFO 06-26 16:17:11 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 16:17:13 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-26 16:17:20 [config.py:717] This model supports multiple tasks: {'reward', 'embed', 'generate', 'classify', 'score'}. Defaulting to 'generate'. +INFO 06-26 16:17:20 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-26 16:17:20 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-26 16:17:21 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-26 16:17:21 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-26 16:17:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1], buffer_handle=(2, 10485760, 10, 'psm_f688aea9'), local_subscribe_addr='ipc:///tmp/f4d1e1b5-3b7d-443f-aaa6-191bb03e60eb', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 16:17:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_38222f3a'), local_subscribe_addr='ipc:///tmp/61c75660-6040-4301-be09-daf91851aceb', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 16:17:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_b84aa908'), local_subscribe_addr='ipc:///tmp/861bfec8-e610-40bf-b4f7-41be6bb13a7a', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:23 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:23 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:23 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:23 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:23 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:23 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_0b17bfa4'), local_subscribe_addr='ipc:///tmp/553f952a-a2ac-4fd1-9c22-d92575ca5e0c', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:23 [parallel_state.py:1004] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:23 [parallel_state.py:1004] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3061950) WARNING 06-26 16:17:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3061949) WARNING 06-26 16:17:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:25 [loader.py:458] Loading weights took 1.07 seconds +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:25 [loader.py:458] Loading weights took 1.11 seconds +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:25 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.266110 seconds +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:25 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.299667 seconds +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:31 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:31 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:31 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:31 [backends.py:430] Dynamo bytecode transform time: 5.86 s +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:35 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:35 [backends.py:136] Cache the graph of shape None for later use +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:17:56 [backends.py:148] Compiling a graph for general shape takes 24.57 s +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:17:56 [backends.py:148] Compiling a graph for general shape takes 24.81 s +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:18:18 [monitor.py:33] torch.compile takes 30.67 s in total +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:18:18 [monitor.py:33] torch.compile takes 30.33 s in total +INFO 06-26 16:18:19 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 16:18:19 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +INFO 06-26 16:18:19 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 16:18:19 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:18:43 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:18:43 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3061950) INFO 06-26 16:18:43 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.36 GiB +(VllmWorker rank=0 pid=3061949) INFO 06-26 16:18:43 [gpu_model_runner.py:1686] Graph capturing finished in 24 secs, took 2.36 GiB +INFO 06-26 16:18:43 [core.py:159] init engine (profile, create kv cache, warmup model) took 78.38 seconds +INFO 06-26 16:18:44 [core_client.py:439] Core engine process 0 ready. +INFO 06-26 16:33:12 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 16:33:12 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 17:52:20 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 17:52:20 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5057|± |0.0281| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.5643|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6030|± |0.0159| +|mm\|commonsenseqa\|0| 0|sem |0.5250|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + +INFO 06-26 21:26:07 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 21:26:07 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.6510|± |0.0268| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.7507|± |0.0222| +|mm\|arc_easy\|0 | 0|sem |0.8163|± |0.0126| +|mm\|commonsenseqa\|0| 0|sem |0.6156|± |0.0272| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.4215|± |0.0451| + diff --git a/test/0-1k/logs/M-Phi4-10M.log b/test/0-1k/logs/M-Phi4-10M.log new file mode 100644 index 0000000000000000000000000000000000000000..1496020840a7782d66a26e7d9e25cc70a911c5cd --- /dev/null +++ b/test/0-1k/logs/M-Phi4-10M.log @@ -0,0 +1,77 @@ +INFO 06-26 16:33:11 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 16:33:12 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-26 16:33:19 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'classify', 'reward', 'generate'}. Defaulting to 'generate'. +INFO 06-26 16:33:19 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-26 16:33:19 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-26 16:33:21 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-26 16:33:21 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-26 16:33:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1], buffer_handle=(2, 10485760, 10, 'psm_c2855650'), local_subscribe_addr='ipc:///tmp/e7f16b78-5da0-4629-964a-90ce4ee47bc4', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 16:33:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_05cc1b4a'), local_subscribe_addr='ipc:///tmp/05d9a8bc-7d4f-4474-939e-c2cf11f05605', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 16:33:21 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:21 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_1fce3b06'), local_subscribe_addr='ipc:///tmp/2fb205ae-e270-4df1-b9ea-72cf2c9211d4', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:22 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:22 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:22 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:22 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:23 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:23 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:23 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_f1737078'), local_subscribe_addr='ipc:///tmp/ed4c9e86-990c-4cef-8e4f-18b180f6ce58', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:23 [parallel_state.py:1004] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:23 [parallel_state.py:1004] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:23 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3071957) WARNING 06-26 16:33:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3071952) WARNING 06-26 16:33:23 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:23 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:24 [loader.py:458] Loading weights took 1.04 seconds +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:24 [loader.py:458] Loading weights took 1.11 seconds +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:24 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.224086 seconds +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:24 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.287656 seconds +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:30 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:30 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:30 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:30 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:35 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.322 s +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:35 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.408 s +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:33:40 [monitor.py:33] torch.compile takes 5.76 s in total +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:33:40 [monitor.py:33] torch.compile takes 5.69 s in total +INFO 06-26 16:33:41 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 16:33:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +INFO 06-26 16:33:41 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 16:33:41 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:34:01 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:34:01 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=0 pid=3071952) INFO 06-26 16:34:01 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +(VllmWorker rank=1 pid=3071957) INFO 06-26 16:34:01 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +INFO 06-26 16:34:02 [core.py:159] init engine (profile, create kv cache, warmup model) took 37.17 seconds +INFO 06-26 16:34:02 [core_client.py:439] Core engine process 0 ready. +INFO 06-26 16:48:30 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 16:48:30 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 17:52:20 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 17:52:20 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5057|± |0.0281| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.5643|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6030|± |0.0159| +|mm\|commonsenseqa\|0| 0|sem |0.5250|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + +INFO 06-26 21:26:07 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 21:26:07 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.6510|± |0.0268| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.7507|± |0.0222| +|mm\|arc_easy\|0 | 0|sem |0.8163|± |0.0126| +|mm\|commonsenseqa\|0| 0|sem |0.6156|± |0.0272| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.4215|± |0.0451| + diff --git a/test/0-1k/logs/M-Phi4-5L.log b/test/0-1k/logs/M-Phi4-5L.log new file mode 100644 index 0000000000000000000000000000000000000000..5b7ab140bfa5b2031fdbdbd8dc88ccdb9a3d3281 --- /dev/null +++ b/test/0-1k/logs/M-Phi4-5L.log @@ -0,0 +1,77 @@ +INFO 06-26 16:48:29 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 16:48:30 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-26 16:48:37 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'reward', 'generate', 'classify'}. Defaulting to 'generate'. +INFO 06-26 16:48:37 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-26 16:48:37 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-26 16:48:39 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-26 16:48:39 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-26 16:48:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1], buffer_handle=(2, 10485760, 10, 'psm_d2853ce3'), local_subscribe_addr='ipc:///tmp/0c180f2a-1f59-4fe7-b47e-22d1506f88ad', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 16:48:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_2108deac'), local_subscribe_addr='ipc:///tmp/f3f3844b-3b9a-4fcb-9c6b-5009ffaa7bc8', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 16:48:39 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:39 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_a65e3302'), local_subscribe_addr='ipc:///tmp/1a19024a-4b16-4fdb-843b-66a6520e43ff', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:40 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:40 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:40 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:40 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:41 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:41 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:41 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_b23c2cc2'), local_subscribe_addr='ipc:///tmp/845c7fbb-71af-49bf-9565-67c7f1e78258', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:41 [parallel_state.py:1004] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:41 [parallel_state.py:1004] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:41 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:41 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3081128) WARNING 06-26 16:48:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3081129) WARNING 06-26 16:48:41 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:41 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:41 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:42 [loader.py:458] Loading weights took 1.11 seconds +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:42 [loader.py:458] Loading weights took 1.15 seconds +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:42 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.291076 seconds +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:42 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.338657 seconds +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:48 [backends.py:430] Dynamo bytecode transform time: 5.66 s +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:48 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:48 [backends.py:430] Dynamo bytecode transform time: 5.69 s +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:53 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.369 s +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:53 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.438 s +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:48:59 [monitor.py:33] torch.compile takes 5.69 s in total +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:48:59 [monitor.py:33] torch.compile takes 5.66 s in total +INFO 06-26 16:48:59 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 16:48:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +INFO 06-26 16:48:59 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 16:48:59 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:49:19 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:49:19 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3081129) INFO 06-26 16:49:19 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +(VllmWorker rank=0 pid=3081128) INFO 06-26 16:49:19 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +INFO 06-26 16:49:20 [core.py:159] init engine (profile, create kv cache, warmup model) took 37.03 seconds +INFO 06-26 16:49:20 [core_client.py:439] Core engine process 0 ready. +INFO 06-26 17:03:47 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 17:03:47 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 17:52:20 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 17:52:20 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5065|± |0.0281| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.5643|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6061|± |0.0159| +|mm\|commonsenseqa\|0| 0|sem |0.5250|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + +INFO 06-26 21:26:07 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 21:26:07 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.6521|± |0.0267| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.7507|± |0.0222| +|mm\|arc_easy\|0 | 0|sem |0.8205|± |0.0125| +|mm\|commonsenseqa\|0| 0|sem |0.6156|± |0.0272| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.4215|± |0.0451| + diff --git a/test/0-1k/logs/M-Phi4-5M.log b/test/0-1k/logs/M-Phi4-5M.log new file mode 100644 index 0000000000000000000000000000000000000000..081880e52299badd2ab8df2276441ab7b56eb779 --- /dev/null +++ b/test/0-1k/logs/M-Phi4-5M.log @@ -0,0 +1,77 @@ +INFO 06-26 17:03:46 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 17:03:47 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-26 17:03:54 [config.py:717] This model supports multiple tasks: {'score', 'generate', 'reward', 'embed', 'classify'}. Defaulting to 'generate'. +INFO 06-26 17:03:55 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-26 17:03:55 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-26 17:03:56 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-26 17:03:56 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-26 17:03:56 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1], buffer_handle=(2, 10485760, 10, 'psm_2ccaed9a'), local_subscribe_addr='ipc:///tmp/579adb72-1bf2-4e4a-ad42-53ba807b6361', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 17:03:56 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:56 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_4ec9bfa9'), local_subscribe_addr='ipc:///tmp/45411365-6d30-49c6-a3ea-ef4acaad1ece', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 17:03:56 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:56 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_d1abe504'), local_subscribe_addr='ipc:///tmp/7e173fb2-2879-4ea4-9b47-fc38d21e35f9', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:58 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:58 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:58 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:58 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:58 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:58 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:58 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_02b1d087'), local_subscribe_addr='ipc:///tmp/e9df8d3f-04e7-4747-b9ca-b38e258aeadd', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:58 [parallel_state.py:1004] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:58 [parallel_state.py:1004] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:58 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:58 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3089993) WARNING 06-26 17:03:58 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3089992) WARNING 06-26 17:03:58 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:58 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:58 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:03:59 [loader.py:458] Loading weights took 1.10 seconds +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:03:59 [loader.py:458] Loading weights took 1.14 seconds +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:04:00 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.286406 seconds +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:04:00 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.325404 seconds +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:04:05 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:04:05 [backends.py:430] Dynamo bytecode transform time: 5.76 s +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:04:05 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:04:05 [backends.py:430] Dynamo bytecode transform time: 5.79 s +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:04:10 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.382 s +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:04:10 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.415 s +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:04:16 [monitor.py:33] torch.compile takes 5.79 s in total +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:04:16 [monitor.py:33] torch.compile takes 5.76 s in total +INFO 06-26 17:04:17 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 17:04:17 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +INFO 06-26 17:04:17 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 17:04:17 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:04:37 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:04:37 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3089993) INFO 06-26 17:04:37 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +(VllmWorker rank=0 pid=3089992) INFO 06-26 17:04:37 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +INFO 06-26 17:04:37 [core.py:159] init engine (profile, create kv cache, warmup model) took 37.42 seconds +INFO 06-26 17:04:37 [core_client.py:439] Core engine process 0 ready. +INFO 06-26 17:19:05 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 17:19:05 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 17:52:20 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 17:52:20 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.5057|± |0.0281| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.5643|± |0.0254| +|mm\|arc_easy\|0 | 0|sem |0.6030|± |0.0159| +|mm\|commonsenseqa\|0| 0|sem |0.5250|± |0.0280| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.3306|± |0.0429| + +INFO 06-26 21:26:07 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 21:26:07 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.6510|± |0.0268| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.7507|± |0.0222| +|mm\|arc_easy\|0 | 0|sem |0.8163|± |0.0126| +|mm\|commonsenseqa\|0| 0|sem |0.6156|± |0.0272| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.4215|± |0.0451| + diff --git a/test/0-1k/logs/M1-Phi4-10L.log b/test/0-1k/logs/M1-Phi4-10L.log new file mode 100644 index 0000000000000000000000000000000000000000..a558c2c3d8bb83bdb103f8bc0ef1b89b52d4eea2 --- /dev/null +++ b/test/0-1k/logs/M1-Phi4-10L.log @@ -0,0 +1,62 @@ +INFO 06-26 23:05:03 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 23:05:04 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-26 23:05:11 [config.py:717] This model supports multiple tasks: {'embed', 'score', 'classify', 'reward', 'generate'}. Defaulting to 'generate'. +INFO 06-26 23:05:11 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-26 23:05:11 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-26 23:05:13 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-26 23:05:13 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-26 23:05:13 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1], buffer_handle=(2, 10485760, 10, 'psm_4bc251f4'), local_subscribe_addr='ipc:///tmp/029ff08f-8d9a-4a9a-8e83-5ba2cff44fed', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 23:05:13 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:13 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_604227c5'), local_subscribe_addr='ipc:///tmp/204ccc90-c5ac-4045-a2f5-99eeda6dfef3', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 23:05:13 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:13 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_13441b8c'), local_subscribe_addr='ipc:///tmp/a7f4844b-c70e-4dce-986c-a53e88315449', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:15 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:15 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:15 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:15 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:15 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:15 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:15 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_3e011a61'), local_subscribe_addr='ipc:///tmp/ec95a330-c2a8-48f6-b9fa-4e8e8518ff5e', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:15 [parallel_state.py:1004] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:15 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3267058) WARNING 06-26 23:05:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:15 [parallel_state.py:1004] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:15 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3267056) WARNING 06-26 23:05:15 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:15 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:15 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:16 [loader.py:458] Loading weights took 1.11 seconds +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:17 [loader.py:458] Loading weights took 1.16 seconds +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:17 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.289089 seconds +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:17 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.336661 seconds +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:22 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:22 [backends.py:430] Dynamo bytecode transform time: 5.67 s +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:23 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:23 [backends.py:430] Dynamo bytecode transform time: 5.78 s +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:28 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.374 s +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:28 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.417 s +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:33 [monitor.py:33] torch.compile takes 5.78 s in total +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:33 [monitor.py:33] torch.compile takes 5.67 s in total +INFO 06-26 23:05:34 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 23:05:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +INFO 06-26 23:05:34 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 23:05:34 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:54 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:55 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3267058) INFO 06-26 23:05:55 [gpu_model_runner.py:1686] Graph capturing finished in 21 secs, took 2.36 GiB +(VllmWorker rank=0 pid=3267056) INFO 06-26 23:05:55 [gpu_model_runner.py:1686] Graph capturing finished in 21 secs, took 2.36 GiB +INFO 06-26 23:05:55 [core.py:159] init engine (profile, create kv cache, warmup model) took 37.83 seconds +INFO 06-26 23:05:55 [core_client.py:439] Core engine process 0 ready. +INFO 06-26 23:20:20 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 23:20:20 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.6523|± |0.0267| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.7507|± |0.0222| +|mm\|arc_easy\|0 | 0|sem |0.8215|± |0.0124| +|mm\|commonsenseqa\|0| 0|sem |0.6156|± |0.0272| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.4215|± |0.0451| + diff --git a/test/0-1k/logs/M1-Phi4-10M.log b/test/0-1k/logs/M1-Phi4-10M.log new file mode 100644 index 0000000000000000000000000000000000000000..9172aa05fed75b2af8f22cdb68820afd65372070 --- /dev/null +++ b/test/0-1k/logs/M1-Phi4-10M.log @@ -0,0 +1,62 @@ +INFO 06-26 23:20:19 [__init__.py:239] Automatically detected platform cuda. +INFO 06-26 23:20:20 [config.py:209] Replacing legacy 'type' key with 'rope_type' +INFO 06-26 23:20:27 [config.py:717] This model supports multiple tasks: {'score', 'reward', 'generate', 'embed', 'classify'}. Defaulting to 'generate'. +INFO 06-26 23:20:28 [config.py:1770] Defaulting to use mp for distributed inference +INFO 06-26 23:20:28 [config.py:2003] Chunked prefill is enabled with max_num_batched_tokens=16384. +INFO 06-26 23:20:29 [core.py:58] Initializing a V1 LLM engine (v0.8.5.post1) with config: model='./models/R-Phi4', speculative_config=None, tokenizer='./models/R-Phi4', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config=None, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=2048, download_dir=None, load_format=auto, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(guided_decoding_backend='auto', reasoning_backend=None), observability_config=ObservabilityConfig(show_hidden_metrics=False, otlp_traces_endpoint=None, collect_model_forward_time=False, collect_model_execute_time=False), seed=None, served_model_name=./models/R-Phi4, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=True, use_async_output_proc=True, disable_mm_preprocessor_cache=False, mm_processor_kwargs=None, pooler_config=None, compilation_config={"level":3,"custom_ops":["none"],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"max_capture_size":512} +WARNING 06-26 23:20:29 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 128 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed. +INFO 06-26 23:20:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0, 1], buffer_handle=(2, 10485760, 10, 'psm_91db3df4'), local_subscribe_addr='ipc:///tmp/99c135bd-1d4b-4daf-83bc-a6f70abaf6e9', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 23:20:29 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_ac810fef'), local_subscribe_addr='ipc:///tmp/34bc21d9-e012-4b83-81c4-4c6631a3b315', remote_subscribe_addr=None, remote_addr_ipv6=False) +WARNING 06-26 23:20:29 [utils.py:2522] Methods determine_num_available_blocks,device_config,get_cache_block_size_bytes,initialize_cache not implemented in +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:29 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[0], buffer_handle=(1, 10485760, 10, 'psm_67acfb4e'), local_subscribe_addr='ipc:///tmp/4dfc25c2-785e-46da-aa84-5c743348f0ce', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:31 [utils.py:1055] Found nccl from library libnccl.so.2 +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:31 [pynccl.py:69] vLLM is using nccl==2.21.5 +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:31 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:31 [custom_all_reduce_utils.py:244] reading GPU P2P access cache from /home/jiangli/.cache/vllm/gpu_p2p_access_cache_for_2,3.json +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:31 [shm_broadcast.py:266] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_f4532ecd'), local_subscribe_addr='ipc:///tmp/bfdb7a74-4fdf-4e8a-aa92-bb3569da3f1d', remote_subscribe_addr=None, remote_addr_ipv6=False) +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:31 [parallel_state.py:1004] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1 +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:31 [parallel_state.py:1004] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0 +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:31 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:31 [cuda.py:221] Using Flash Attention backend on V1 engine. +(VllmWorker rank=1 pid=3276091) WARNING 06-26 23:20:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=0 pid=3276090) WARNING 06-26 23:20:31 [topk_topp_sampler.py:69] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:31 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:31 [gpu_model_runner.py:1329] Starting to load model ./models/R-Phi4... +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:32 [loader.py:458] Loading weights took 1.12 seconds +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:32 [loader.py:458] Loading weights took 1.17 seconds +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:33 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.296202 seconds +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:33 [gpu_model_runner.py:1347] Model loading took 3.6254 GiB and 1.345457 seconds +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:38 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_1_0 for vLLM's torch.compile +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:38 [backends.py:430] Dynamo bytecode transform time: 5.65 s +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:38 [backends.py:420] Using cache directory: /home/jiangli/.cache/vllm/torch_compile_cache/fe665d3e67/rank_0_0 for vLLM's torch.compile +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:38 [backends.py:430] Dynamo bytecode transform time: 5.78 s +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:43 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.361 s +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:44 [backends.py:118] Directly load the compiled graph(s) for shape None from the cache, took 4.395 s +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:20:49 [monitor.py:33] torch.compile takes 5.65 s in total +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:20:49 [monitor.py:33] torch.compile takes 5.78 s in total +INFO 06-26 23:20:50 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 23:20:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +INFO 06-26 23:20:50 [kv_cache_utils.py:634] GPU KV cache size: 973,584 tokens +INFO 06-26 23:20:50 [kv_cache_utils.py:637] Maximum concurrency for 2,048 tokens per request: 475.38x +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:21:10 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:21:10 [custom_all_reduce.py:195] Registering 4355 cuda graph addresses +(VllmWorker rank=1 pid=3276091) INFO 06-26 23:21:10 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +(VllmWorker rank=0 pid=3276090) INFO 06-26 23:21:10 [gpu_model_runner.py:1686] Graph capturing finished in 20 secs, took 2.36 GiB +INFO 06-26 23:21:10 [core.py:159] init engine (profile, create kv cache, warmup model) took 37.27 seconds +INFO 06-26 23:21:10 [core_client.py:439] Core engine process 0 ready. +INFO 06-26 23:35:35 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-26 23:35:35 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.6521|± |0.0267| +| | |math_pass@1:1_samples|0.7895|± |0.0406| +|mm\|arc_challenge\|0| 0|sem |0.7507|± |0.0222| +|mm\|arc_easy\|0 | 0|sem |0.8205|± |0.0125| +|mm\|commonsenseqa\|0| 0|sem |0.6156|± |0.0272| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7539|± |0.0204| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.8250|± |0.0608| +|mm\|truthfulqa\|0 | 0|sem |0.4215|± |0.0451| + diff --git a/test/0-1k/logs/R-Phi4.log b/test/0-1k/logs/R-Phi4.log new file mode 100644 index 0000000000000000000000000000000000000000..91ffa0970158aa563b9535048e9e30c76e24b47e --- /dev/null +++ b/test/0-1k/logs/R-Phi4.log @@ -0,0 +1,15 @@ +INFO 06-25 00:31:45 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-25 00:31:45 [__init__.py:239] Automatically detected platform cuda. +INFO 06-25 00:32:59 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-25 00:32:59 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |sem |0.9134|± |0.0168| +| | |math_pass@1:1_samples|0.9743|± |0.0052| +|mm\|arc_challenge\|0| 0|sem |0.9528|± |0.0109| +|mm\|arc_easy\|0 | 0|sem |0.9768|± |0.0049| +|mm\|commonsenseqa\|0| 0|sem |0.8812|± |0.0181| +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.9485|± |0.0105| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|1.0000|± |0.0000| +|mm\|truthfulqa\|0 | 0|sem |0.8430|± |0.0332| + diff --git a/test/0-1k/logs/R1-Llama3-8B.log b/test/0-1k/logs/R1-Llama3-8B.log new file mode 100644 index 0000000000000000000000000000000000000000..f5f6ad774b0da7785cf539502c17fdd7227b5e95 --- /dev/null +++ b/test/0-1k/logs/R1-Llama3-8B.log @@ -0,0 +1,16 @@ +INFO 06-25 00:31:52 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-25 00:31:52 [__init__.py:239] Automatically detected platform cuda. +INFO 06-25 00:34:07 [importing.py:53] Triton module has been replaced with a placeholder. +INFO 06-25 00:34:07 [__init__.py:239] Automatically detected platform cuda. +| Task |Version| Metric |Value | |Stderr| +|------------------|------:|---------------------|-----:|---|-----:| +|all | |math_pass@1:1_samples|0.8371|± |0.0295| +| | |sem |0.7179|± |0.0201| +|mm\|arc_challenge\|0| 0|sem |0.9657|± |0.0102| +|mm\|arc_easy\|0 | 0|sem |0.9868|± |0.0039| +|mm\|commonsenseqa\|0| 0|sem |0.8869|± |0.0189| +|mm\|gpqa_diamond\|0 | 2|sem |0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|0.7592|± |0.0179| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|0.9149|± |0.0411| +|mm\|truthfulqa\|0 | 0|sem |0.7500|± |0.0475| + diff --git a/test/0-1k/logs/show_results.log b/test/0-1k/logs/show_results.log new file mode 100644 index 0000000000000000000000000000000000000000..7389d80ed37a2ad5db6351cb48c8869ec1da0b0f --- /dev/null +++ b/test/0-1k/logs/show_results.log @@ -0,0 +1,81 @@ +| Task |Version| Metric | Model |Value | |Stderr| +|------------------|------:|---------------------|---------------------|-----:|---|-----:| +|mm\|arc_challenge\|0| 0|sem |._merged_M-Phi4-10L |0.5643|± |0.0254| +| | | |._merged_M-Phi4-10M |0.5643|± |0.0254| +| | | |._merged_M-Phi4-5L |0.5643|± |0.0254| +| | | |._merged_M-Phi4-5M |0.5643|± |0.0254| +| | | |._models_R-Phi4 |0.9528|± |0.0109| +| | | |._models_R1-Llama3-8B|0.9657|± |0.0102| +|mm\|arc_easy\|0 | 0|sem |._merged_M-Phi4-10L |0.6030|± |0.0159| +| | | |._merged_M-Phi4-10M |0.6030|± |0.0159| +| | | |._merged_M-Phi4-5L |0.6061|± |0.0159| +| | | |._merged_M-Phi4-5M |0.6030|± |0.0159| +| | | |._models_R-Phi4 |0.9768|± |0.0049| +| | | |._models_R1-Llama3-8B|0.9868|± |0.0039| +|mm\|commonsenseqa\|0| 0|sem |._merged_M-Phi4-10L |0.5250|± |0.0280| +| | | |._merged_M-Phi4-10M |0.5250|± |0.0280| +| | | |._merged_M-Phi4-5L |0.5250|± |0.0280| +| | | |._merged_M-Phi4-5M |0.5250|± |0.0280| +| | | |._models_R-Phi4 |0.8812|± |0.0181| +| | | |._models_R1-Llama3-8B|0.8869|± |0.0189| +|mm\|gpqa_diamond\|0 | 2|sem |._models_R1-Llama3-8B|0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|._merged_M-Phi4-10L |0.7539|± |0.0204| +| | | |._merged_M-Phi4-10M |0.7539|± |0.0204| +| | | |._merged_M-Phi4-5L |0.7539|± |0.0204| +| | | |._merged_M-Phi4-5M |0.7539|± |0.0204| +| | | |._models_R-Phi4 |0.9485|± |0.0105| +| | | |._models_R1-Llama3-8B|0.7592|± |0.0179| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|._merged_M-Phi4-10L |0.8250|± |0.0608| +| | | |._merged_M-Phi4-10M |0.8250|± |0.0608| +| | | |._merged_M-Phi4-5L |0.8250|± |0.0608| +| | | |._merged_M-Phi4-5M |0.8250|± |0.0608| +| | | |._models_R-Phi4 |1.0000|± |0.0000| +| | | |._models_R1-Llama3-8B|0.9149|± |0.0411| +|mm\|truthfulqa\|0 | 0|sem |._merged_M-Phi4-10L |0.3306|± |0.0429| +| | | |._merged_M-Phi4-10M |0.3306|± |0.0429| +| | | |._merged_M-Phi4-5L |0.3306|± |0.0429| +| | | |._merged_M-Phi4-5M |0.3306|± |0.0429| +| | | |._models_R-Phi4 |0.8430|± |0.0332| +| | | |._models_R1-Llama3-8B|0.7500|± |0.0475| + +adjust the Metric +| Task |Version| Metric | Model |Value | |Stderr| +|------------------|------:|---------------------|---------------------|-----:|---|-----:| +|mm\|arc_challenge\|0| 0|sem |._merged_M-Phi4-10L |0.7507|± |0.0222| +| | | |._merged_M-Phi4-10M |0.7507|± |0.0222| +| | | |._merged_M-Phi4-5L |0.7507|± |0.0222| +| | | |._merged_M-Phi4-5M |0.7507|± |0.0222| +| | | |._models_R-Phi4 |0.9528|± |0.0109| +| | | |._models_R1-Llama3-8B|0.9657|± |0.0102| +|mm\|arc_easy\|0 | 0|sem |._merged_M-Phi4-10L |0.8163|± |0.0126| +| | | |._merged_M-Phi4-10M |0.8163|± |0.0126| +| | | |._merged_M-Phi4-5L |0.8205|± |0.0125| +| | | |._merged_M-Phi4-5M |0.8163|± |0.0126| +| | | |._models_R-Phi4 |0.9768|± |0.0049| +| | | |._models_R1-Llama3-8B|0.9868|± |0.0039| +|mm\|commonsenseqa\|0| 0|sem |._merged_M-Phi4-10L |0.6156|± |0.0272| +| | | |._merged_M-Phi4-10M |0.6156|± |0.0272| +| | | |._merged_M-Phi4-5L |0.6156|± |0.0272| +| | | |._merged_M-Phi4-5M |0.6156|± |0.0272| +| | | |._models_R-Phi4 |0.8812|± |0.0181| +| | | |._models_R1-Llama3-8B|0.8869|± |0.0189| +|mm\|gpqa_diamond\|0 | 2|sem |._models_R1-Llama3-8B|0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|._merged_M-Phi4-10L |0.7539|± |0.0204| +| | | |._merged_M-Phi4-10M |0.7539|± |0.0204| +| | | |._merged_M-Phi4-5L |0.7539|± |0.0204| +| | | |._merged_M-Phi4-5M |0.7539|± |0.0204| +| | | |._models_R-Phi4 |0.9485|± |0.0105| +| | | |._models_R1-Llama3-8B|0.7592|± |0.0179| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|._merged_M-Phi4-10L |0.8250|± |0.0608| +| | | |._merged_M-Phi4-10M |0.8250|± |0.0608| +| | | |._merged_M-Phi4-5L |0.8250|± |0.0608| +| | | |._merged_M-Phi4-5M |0.8250|± |0.0608| +| | | |._models_R-Phi4 |1.0000|± |0.0000| +| | | |._models_R1-Llama3-8B|0.9149|± |0.0411| +|mm\|truthfulqa\|0 | 0|sem |._merged_M-Phi4-10L |0.4215|± |0.0451| +| | | |._merged_M-Phi4-10M |0.4215|± |0.0451| +| | | |._merged_M-Phi4-5L |0.4215|± |0.0451| +| | | |._merged_M-Phi4-5M |0.4215|± |0.0451| +| | | |._models_R-Phi4 |0.8430|± |0.0332| +| | | |._models_R1-Llama3-8B|0.7500|± |0.0475| + diff --git a/test/0-1k/logs/show_results1.log b/test/0-1k/logs/show_results1.log new file mode 100644 index 0000000000000000000000000000000000000000..3ae834eecfdd4c1a943a0dfe6696906ec8a2190f --- /dev/null +++ b/test/0-1k/logs/show_results1.log @@ -0,0 +1,58 @@ +| Task |Version| Metric | Model |Value | |Stderr| +|------------------|------:|---------------------|---------------------|-----:|---|-----:| +|mm\|arc_challenge\|0| 0|sem |._merged_M-Phi4-10L |0.7507|± |0.0222| +| | | |._merged_M-Phi4-10M |0.7507|± |0.0222| +| | | |._merged_M-Phi4-5L |0.7507|± |0.0222| +| | | |._merged_M-Phi4-5M |0.7507|± |0.0222| +| | | |._merged_M1-Phi4-10L |0.7507|± |0.0222| +| | | |._merged_M1-Phi4-10M |0.7507|± |0.0222| +| | | |._models_I-Phi4 |0.9501|± |0.0112| +| | | |._models_R-Phi4 |0.9528|± |0.0109| +| | | |._models_R1-Llama3-8B|0.9657|± |0.0102| +|mm\|arc_easy\|0 | 0|sem |._merged_M-Phi4-10L |0.8163|± |0.0126| +| | | |._merged_M-Phi4-10M |0.8163|± |0.0126| +| | | |._merged_M-Phi4-5L |0.8205|± |0.0125| +| | | |._merged_M-Phi4-5M |0.8163|± |0.0126| +| | | |._merged_M1-Phi4-10L |0.8215|± |0.0124| +| | | |._merged_M1-Phi4-10M |0.8205|± |0.0125| +| | | |._models_I-Phi4 |0.9768|± |0.0049| +| | | |._models_R-Phi4 |0.9768|± |0.0049| +| | | |._models_R1-Llama3-8B|0.9868|± |0.0039| +|mm\|commonsenseqa\|0| 0|sem |._merged_M-Phi4-10L |0.6156|± |0.0272| +| | | |._merged_M-Phi4-10M |0.6156|± |0.0272| +| | | |._merged_M-Phi4-5L |0.6156|± |0.0272| +| | | |._merged_M-Phi4-5M |0.6156|± |0.0272| +| | | |._merged_M1-Phi4-10L |0.6156|± |0.0272| +| | | |._merged_M1-Phi4-10M |0.6156|± |0.0272| +| | | |._models_I-Phi4 |0.8344|± |0.0208| +| | | |._models_R-Phi4 |0.8812|± |0.0181| +| | | |._models_R1-Llama3-8B|0.8869|± |0.0189| +|mm\|gpqa_diamond\|0 | 2|sem |._models_R1-Llama3-8B|0.0000| | | +|mm\|gsm8k\|0 | 0|math_pass@1:1_samples|._merged_M-Phi4-10L |0.7539|± |0.0204| +| | | |._merged_M-Phi4-10M |0.7539|± |0.0204| +| | | |._merged_M-Phi4-5L |0.7539|± |0.0204| +| | | |._merged_M-Phi4-5M |0.7539|± |0.0204| +| | | |._merged_M1-Phi4-10L |0.7539|± |0.0204| +| | | |._merged_M1-Phi4-10M |0.7539|± |0.0204| +| | | |._models_I-Phi4 |0.9351|± |0.0117| +| | | |._models_R-Phi4 |0.9485|± |0.0105| +| | | |._models_R1-Llama3-8B|0.7592|± |0.0179| +|mm\|math_500\|0 | 3|math_pass@1:1_samples|._merged_M-Phi4-10L |0.8250|± |0.0608| +| | | |._merged_M-Phi4-10M |0.8250|± |0.0608| +| | | |._merged_M-Phi4-5L |0.8250|± |0.0608| +| | | |._merged_M-Phi4-5M |0.8250|± |0.0608| +| | | |._merged_M1-Phi4-10L |0.8250|± |0.0608| +| | | |._merged_M1-Phi4-10M |0.8250|± |0.0608| +| | | |._models_I-Phi4 |1.0000|± |0.0000| +| | | |._models_R-Phi4 |1.0000|± |0.0000| +| | | |._models_R1-Llama3-8B|0.9149|± |0.0411| +|mm\|truthfulqa\|0 | 0|sem |._merged_M-Phi4-10L |0.4215|± |0.0451| +| | | |._merged_M-Phi4-10M |0.4215|± |0.0451| +| | | |._merged_M-Phi4-5L |0.4215|± |0.0451| +| | | |._merged_M-Phi4-5M |0.4215|± |0.0451| +| | | |._merged_M1-Phi4-10L |0.4215|± |0.0451| +| | | |._merged_M1-Phi4-10M |0.4215|± |0.0451| +| | | |._models_I-Phi4 |0.7686|± |0.0385| +| | | |._models_R-Phi4 |0.8430|± |0.0332| +| | | |._models_R1-Llama3-8B|0.7500|± |0.0475| + diff --git a/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbb2865578bb29c0024055913f5086e0b7a78311 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2a0ac3ac1b5a550478bd7e0026a6a07dc935c356238fd91f667ea72c8ce8fa7 +size 3523775 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..602c139c5f37077906445b7c483b0e02635b1128 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66f2e4c08b2abafc11954dd0ecdc1e407980d6f831cd94390ead549f4f606a06 +size 8171727 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab151a731c00918fcf3a5b695d5aef0a8793dab7 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42e151e99b98f4c3abe9a2ac86790f19382162d79f428b5fc957061834ca7751 +size 2872250 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0f304c68949988b926206c8efb5938e8e13588f --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b007b099d1d5ca21f49cb1237f214d1777495062247c2f6f6e40baee35a6eb4 +size 3036743 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..70ab42d9bb816c50f7b54de43ed3a11f93b2d5ed --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6de8483256272873ace2754c95b1cb189df2dad999fb5b1c05b5679e3be263 +size 317248 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4cc0f1a88ea0e7ad3d2f775d31981bce7c562515 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f3c5c35896a0db6e8fac7f81849c0ddc54546524ac8467a1b895f3f2dd439d0 +size 1153406 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbb2865578bb29c0024055913f5086e0b7a78311 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2a0ac3ac1b5a550478bd7e0026a6a07dc935c356238fd91f667ea72c8ce8fa7 +size 3523775 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..89264e01d9e020a6ded3970711bb707e0da50c03 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e5fe6812cb09dbdd15af279f1e9dbe48a78099a07f00230722b8cc26b71bd58 +size 8171457 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab151a731c00918fcf3a5b695d5aef0a8793dab7 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42e151e99b98f4c3abe9a2ac86790f19382162d79f428b5fc957061834ca7751 +size 2872250 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0f304c68949988b926206c8efb5938e8e13588f --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b007b099d1d5ca21f49cb1237f214d1777495062247c2f6f6e40baee35a6eb4 +size 3036743 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..70ab42d9bb816c50f7b54de43ed3a11f93b2d5ed --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6de8483256272873ace2754c95b1cb189df2dad999fb5b1c05b5679e3be263 +size 317248 diff --git a/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4cc0f1a88ea0e7ad3d2f775d31981bce7c562515 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f3c5c35896a0db6e8fac7f81849c0ddc54546524ac8467a1b895f3f2dd439d0 +size 1153406 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbb2865578bb29c0024055913f5086e0b7a78311 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2a0ac3ac1b5a550478bd7e0026a6a07dc935c356238fd91f667ea72c8ce8fa7 +size 3523775 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cb0257ea64f60aa8b489fa50a967a3c18d8c1a1c --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b99d2bd680c897db27c31af1bd6f5281c065f271af181c22a6f105a6c001b7ee +size 8174970 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab151a731c00918fcf3a5b695d5aef0a8793dab7 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42e151e99b98f4c3abe9a2ac86790f19382162d79f428b5fc957061834ca7751 +size 2872250 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0f304c68949988b926206c8efb5938e8e13588f --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b007b099d1d5ca21f49cb1237f214d1777495062247c2f6f6e40baee35a6eb4 +size 3036743 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..70ab42d9bb816c50f7b54de43ed3a11f93b2d5ed --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6de8483256272873ace2754c95b1cb189df2dad999fb5b1c05b5679e3be263 +size 317248 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fe6286ef6721ba5b24be64249f04acc450264485 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33d0db6485fbd1eb4e702d0893aed03d327f062c26146eaffa53564195a4704b +size 1153183 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbb2865578bb29c0024055913f5086e0b7a78311 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2a0ac3ac1b5a550478bd7e0026a6a07dc935c356238fd91f667ea72c8ce8fa7 +size 3523775 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..89264e01d9e020a6ded3970711bb707e0da50c03 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e5fe6812cb09dbdd15af279f1e9dbe48a78099a07f00230722b8cc26b71bd58 +size 8171457 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab151a731c00918fcf3a5b695d5aef0a8793dab7 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42e151e99b98f4c3abe9a2ac86790f19382162d79f428b5fc957061834ca7751 +size 2872250 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0f304c68949988b926206c8efb5938e8e13588f --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b007b099d1d5ca21f49cb1237f214d1777495062247c2f6f6e40baee35a6eb4 +size 3036743 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..70ab42d9bb816c50f7b54de43ed3a11f93b2d5ed --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6de8483256272873ace2754c95b1cb189df2dad999fb5b1c05b5679e3be263 +size 317248 diff --git a/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4cc0f1a88ea0e7ad3d2f775d31981bce7c562515 --- /dev/null +++ b/test/0-1k/outputs/._merged_M-Phi4-5M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f3c5c35896a0db6e8fac7f81849c0ddc54546524ac8467a1b895f3f2dd439d0 +size 1153406 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..eef2404a0e5bfe00e7854d74ceeb0dc1837e680d --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95bf422b6b2bde6e6642f36a90c9a6e7d96cf81b3d9095676eddec512a5b2ff1 +size 3523861 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..aca63189ba0beaa82d27ff23a9cb9a6fd5e06d1f --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fa81cc97bb8a81353a23cacefdb71e0041420f9283aa25c69f5ce7a1849ad64 +size 8177376 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5af0b74893f5ecbc4b133d12dea06a55a1a0124a --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f956f6d90c4df6dec506e53859b937019a991c9644fbcaae160994e7dbf46561 +size 2872384 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0f304c68949988b926206c8efb5938e8e13588f --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b007b099d1d5ca21f49cb1237f214d1777495062247c2f6f6e40baee35a6eb4 +size 3036743 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..70ab42d9bb816c50f7b54de43ed3a11f93b2d5ed --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6de8483256272873ace2754c95b1cb189df2dad999fb5b1c05b5679e3be263 +size 317248 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0a7b9b3dab9143b85d955f0f77a01bfb58e4e451 --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10L/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6355d649274c2fdfba21f7e25fb8068157c9dfdd30aca337d48e66bc20c75ae +size 1153412 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cbb2865578bb29c0024055913f5086e0b7a78311 --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2a0ac3ac1b5a550478bd7e0026a6a07dc935c356238fd91f667ea72c8ce8fa7 +size 3523775 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..30ceb0632aa3b8fd59aaeccecaf98c4bd5836c37 --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b7c57cb1da14a76f8739605e4cbbc3f997870fe877e8fa2c660099d5c8dfccd +size 8176661 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab151a731c00918fcf3a5b695d5aef0a8793dab7 --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42e151e99b98f4c3abe9a2ac86790f19382162d79f428b5fc957061834ca7751 +size 2872250 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b0f304c68949988b926206c8efb5938e8e13588f --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b007b099d1d5ca21f49cb1237f214d1777495062247c2f6f6e40baee35a6eb4 +size 3036743 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..70ab42d9bb816c50f7b54de43ed3a11f93b2d5ed --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6de8483256272873ace2754c95b1cb189df2dad999fb5b1c05b5679e3be263 +size 317248 diff --git a/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fe6286ef6721ba5b24be64249f04acc450264485 --- /dev/null +++ b/test/0-1k/outputs/._merged_M1-Phi4-10M/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33d0db6485fbd1eb4e702d0893aed03d327f062c26146eaffa53564195a4704b +size 1153183 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|aime24|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|aime24|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c4b37d8dd4043d013dc4d62b869c555820b7a --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|aime24|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01854230a0086cea0ce4988872c851c2d6a619849359c4f09a46855c33bec536 +size 9530 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_challenge|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_challenge|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..86f4d13a3a58dfab480592c96a4dbb29c8b7d932 --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_challenge|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e0f5f26ea36c5a785461c30288481d60e702feae0605cae5ac1f8a253b43d99 +size 1707856 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_easy|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_easy|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..08463d6244d5d79165310335db6fe8ff5a88147a --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|arc_easy|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53407f34d2f089beafc32f481284dd66d39f96a5b652372c1301f9fb21f409d8 +size 2772026 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|commonsenseqa|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|commonsenseqa|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c18b612f7e94f7444b0080bcc5c34c061171ee3b --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|commonsenseqa|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce074beb3f6abba88224b660130c142e988d4ba595533b2df7a1128715696300 +size 874994 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gpqa_diamond|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gpqa_diamond|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c4b37d8dd4043d013dc4d62b869c555820b7a --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gpqa_diamond|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01854230a0086cea0ce4988872c851c2d6a619849359c4f09a46855c33bec536 +size 9530 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gsm8k|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gsm8k|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..691aa6f7cdda5cd9935385ac64413c7f15fb6cd1 --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|gsm8k|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f06aeb4e6597b73d1d1e4df8c19f5768f7434517bd0e61feed92f35585262f6 +size 1686697 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|math_500|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|math_500|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..548b3c95638405d0f4f1bd2e7c55316f3ceef66f --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|math_500|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36e4b9deb5a81fcab51db1026c29d3a05f56b851e61942f90e272fe821ffd7c2 +size 185594 diff --git a/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|truthfulqa|0_2025-06-26T21-33-22.888531.parquet b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|truthfulqa|0_2025-06-26T21-33-22.888531.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5c2bbffcb6211a507eb4a492eb4579cd93287c88 --- /dev/null +++ b/test/0-1k/outputs/._models_I-Phi4/2025-06-26T21-33-22.888531/outputs_mm|truthfulqa|0_2025-06-26T21-33-22.888531.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a479adea684d21998d0a53c3400b1382cd2c2415e25b8c16941e9326f79d6f5 +size 372581 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|aime24|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|aime24|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c4b37d8dd4043d013dc4d62b869c555820b7a --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|aime24|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01854230a0086cea0ce4988872c851c2d6a619849359c4f09a46855c33bec536 +size 9530 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c38e22343560f703b7025e5e63a68936b5324120 --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_challenge|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd9b4554d1bd70f6531cd8ba3a1e1dff1270557019da5922419b2d70a1f74d79 +size 3724073 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0bcf16ccdc6fca25ec8153053b767ef0bde65d6d --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|arc_easy|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecf8f8de123276bf0ae1e69ff16e8b90d594d426ec9c6831d355221a090edfb7 +size 8687940 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bd03fb2d274ae04473eb937b29fd5fedcc8ca510 --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|commonsenseqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cee5662461ea9c40c33968fdee4c45f4430b87696e550d8da93335e1ee1e032 +size 2996499 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gpqa_diamond|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gpqa_diamond|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c4b37d8dd4043d013dc4d62b869c555820b7a --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gpqa_diamond|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01854230a0086cea0ce4988872c851c2d6a619849359c4f09a46855c33bec536 +size 9530 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..289e92d01f898ad813195dced2333623d220dca2 --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|gsm8k|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc0e275c274f6dd2b309a6ba58c06ff2a5ddcd03f71637f440443e70ee24df8d +size 3110921 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..dea317e67825935fddca9d2637d0bf0d6031ebf4 --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|math_500|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e36f8ebf7e53034b723f8efa7c342f5fcd423ea253bbfcb9be57f4f24c014f3e +size 340648 diff --git a/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e5a83a2f9c706633c0e90a5b0ca99fabb57cc649 --- /dev/null +++ b/test/0-1k/outputs/._models_R-Phi4/2025-06-23T01-52-10.258150/outputs_mm|truthfulqa|0_2025-06-23T01-52-10.258150.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5752663d88589620238868f55bc0005b7a51e8c363a0aa52b5f9a2d02ceaec2 +size 1189441 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|aime24|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|aime24|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..970c4b37d8dd4043d013dc4d62b869c555820b7a --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|aime24|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01854230a0086cea0ce4988872c851c2d6a619849359c4f09a46855c33bec536 +size 9530 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..76cd0bc595a60dfb30632a7ed644eac25d4245a6 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|arc_challenge|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f639d35015ee5d5727c4cd848ec0b29a874581d6cd1d0337fd56f8cf0532c221 +size 3875143 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bba9715ad94a30aa4d61407906236b9fb0238f8b --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|arc_easy|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1d970b013dc212f5fafd1788a581c2b995fbb7ac2b21bc62164fba206003fe0 +size 9718496 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b5400bd8cfd975a7e60edb28fe213d6e570a321b --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|commonsenseqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d5a50b6e7296a4a85a911b7fd8c852b7fff85d181a418f614d1e1c473b127d8 +size 3576697 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5145857fd05ecab6e695e0bd952eae0be7a8ace3 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|gpqa_diamond|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b32edd725efd5df77a7e02cb32f4a1be8c2de96f421434ca7bcf2d22a8812fe +size 49817 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e32a21268dc7c17810116a2616921871bd47945 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|gsm8k|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:700b9b4ef458a6125dde969d4b74247e04f56087abe991947442c604ed8ac5c7 +size 4434376 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ec72cf5835f5d4bf69f09327a059e8d77378071 --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|math_500|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ae5e4bd5ad92b52fd3e4add8636f2c32be2a8ffdb938ec8223b226d7861714 +size 596543 diff --git a/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet new file mode 100644 index 0000000000000000000000000000000000000000..88ec4325b337e24eaf4b3cdf4a91199b5a96b51e --- /dev/null +++ b/test/0-1k/outputs/._models_R1-Llama3-8B/2025-06-23T10-15-33.465228/outputs_mm|truthfulqa|0_2025-06-23T10-15-33.465228.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5ae612c19bc90af918a5e6fd98bce4dee3df26af11075ee5d123ba104e98591 +size 1148116 diff --git a/test/0-1k/results/._merged_M-Phi4-10L/results_2025-06-23T01-52-10.258150.json b/test/0-1k/results/._merged_M-Phi4-10L/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..1f29b9e9e3bf0a20eab730b675eeea5ca89b72ba --- /dev/null +++ b/test/0-1k/results/._merged_M-Phi4-10L/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.615625, + "sem_stderr": 0.027235813331371504 + }, + "mm|truthfulqa|0": { + "sem": 0.4214876033057851, + "sem_stderr": 0.04507732278775094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444759 + }, + "mm|arc_challenge|0": { + "sem": 0.7506561679790026, + "sem_stderr": 0.022193614862582374 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7539149888143176, + "math_pass@1:1_samples_stderr": 0.020395593169900115 + }, + "mm|arc_easy|0": { + "sem": 0.8162618796198522, + "sem_stderr": 0.012591249331358639 + }, + "all": { + "sem": 0.6510076627261601, + "sem_stderr": 0.02677450007826586, + "math_pass@1:1_samples": 0.7894574944071588, + "math_pass@1:1_samples_stderr": 0.040619512007173855 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._merged_M-Phi4-10M/results_2025-06-23T01-52-10.258150.json b/test/0-1k/results/._merged_M-Phi4-10M/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..c7d96a0f89b812f101e4a6116dd36c8ac81b81d2 --- /dev/null +++ b/test/0-1k/results/._merged_M-Phi4-10M/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.615625, + "sem_stderr": 0.0272358133313715 + }, + "mm|truthfulqa|0": { + "sem": 0.4214876033057851, + "sem_stderr": 0.04507732278775094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444759 + }, + "mm|arc_challenge|0": { + "sem": 0.7506561679790026, + "sem_stderr": 0.02219361486258237 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7539149888143176, + "math_pass@1:1_samples_stderr": 0.0203955931699001 + }, + "mm|arc_easy|0": { + "sem": 0.8162618796198522, + "sem_stderr": 0.012591249331358637 + }, + "all": { + "sem": 0.6510076627261601, + "sem_stderr": 0.02677450007826586, + "math_pass@1:1_samples": 0.7894574944071588, + "math_pass@1:1_samples_stderr": 0.04061951200717385 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._merged_M-Phi4-5L/results_2025-06-23T01-52-10.258150.json b/test/0-1k/results/._merged_M-Phi4-5L/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..c9f78a6b681c37e5f1090175ee99ede6c0181b51 --- /dev/null +++ b/test/0-1k/results/._merged_M-Phi4-5L/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.615625, + "sem_stderr": 0.027235813331371504 + }, + "mm|truthfulqa|0": { + "sem": 0.4214876033057851, + "sem_stderr": 0.04507732278775094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444759 + }, + "mm|arc_challenge|0": { + "sem": 0.7506561679790026, + "sem_stderr": 0.02219361486258237 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7539149888143176, + "math_pass@1:1_samples_stderr": 0.020395593169900098 + }, + "mm|arc_easy|0": { + "sem": 0.8204857444561774, + "sem_stderr": 0.012477840298724637 + }, + "all": { + "sem": 0.6520636289352413, + "sem_stderr": 0.02674614782010736, + "math_pass@1:1_samples": 0.7894574944071588, + "math_pass@1:1_samples_stderr": 0.04061951200717384 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._merged_M-Phi4-5M/results_2025-06-23T01-52-10.258150.json b/test/0-1k/results/._merged_M-Phi4-5M/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..7f9ade84741a645a89c96ccf563fded20029654d --- /dev/null +++ b/test/0-1k/results/._merged_M-Phi4-5M/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.615625, + "sem_stderr": 0.027235813331371504 + }, + "mm|truthfulqa|0": { + "sem": 0.4214876033057851, + "sem_stderr": 0.04507732278775094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444759 + }, + "mm|arc_challenge|0": { + "sem": 0.7506561679790026, + "sem_stderr": 0.02219361486258237 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7539149888143176, + "math_pass@1:1_samples_stderr": 0.020395593169900108 + }, + "mm|arc_easy|0": { + "sem": 0.8162618796198522, + "sem_stderr": 0.012591249331358637 + }, + "all": { + "sem": 0.6510076627261601, + "sem_stderr": 0.02677450007826586, + "math_pass@1:1_samples": 0.7894574944071588, + "math_pass@1:1_samples_stderr": 0.04061951200717385 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._merged_M1-Phi4-10L/results_2025-06-23T01-52-10.258150.json b/test/0-1k/results/._merged_M1-Phi4-10L/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..de75fb6e35f8d615c9624ea4a958ffa8a2c3bde1 --- /dev/null +++ b/test/0-1k/results/._merged_M1-Phi4-10L/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.615625, + "sem_stderr": 0.027235813331371504 + }, + "mm|truthfulqa|0": { + "sem": 0.4214876033057851, + "sem_stderr": 0.04507732278775094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444759 + }, + "mm|arc_challenge|0": { + "sem": 0.7506561679790026, + "sem_stderr": 0.02219361486258237 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7539149888143176, + "math_pass@1:1_samples_stderr": 0.020395593169900087 + }, + "mm|arc_easy|0": { + "sem": 0.8215417106652587, + "sem_stderr": 0.012449089912246145 + }, + "all": { + "sem": 0.6523276204875117, + "sem_stderr": 0.026738960223487738, + "math_pass@1:1_samples": 0.7894574944071588, + "math_pass@1:1_samples_stderr": 0.04061951200717384 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._merged_M1-Phi4-10M/results_2025-06-23T01-52-10.258150.json b/test/0-1k/results/._merged_M1-Phi4-10M/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..c5a2667a6a485dcce1dabafca7fa92f275bad40d --- /dev/null +++ b/test/0-1k/results/._merged_M1-Phi4-10M/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.615625, + "sem_stderr": 0.027235813331371504 + }, + "mm|truthfulqa|0": { + "sem": 0.4214876033057851, + "sem_stderr": 0.04507732278775094 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.825, + "math_pass@1:1_samples_stderr": 0.06084343084444759 + }, + "mm|arc_challenge|0": { + "sem": 0.7506561679790026, + "sem_stderr": 0.02219361486258237 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7539149888143176, + "math_pass@1:1_samples_stderr": 0.020395593169900094 + }, + "mm|arc_easy|0": { + "sem": 0.8204857444561774, + "sem_stderr": 0.012477840298724637 + }, + "all": { + "sem": 0.6520636289352413, + "sem_stderr": 0.02674614782010736, + "math_pass@1:1_samples": 0.7894574944071588, + "math_pass@1:1_samples_stderr": 0.04061951200717384 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json b/test/0-1k/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json new file mode 100644 index 0000000000000000000000000000000000000000..12c6ee8ce1ce5fd2328bd671618ce6afc8a48e1f --- /dev/null +++ b/test/0-1k/results/._models_I-Phi4/results_2025-06-26T21-33-22.888531.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.834375, + "sem_stderr": 0.020813649923046133 + }, + "mm|arc_easy|0": { + "sem": 0.9767687434002112, + "sem_stderr": 0.0048976377483802 + }, + "mm|truthfulqa|0": { + "sem": 0.768595041322314, + "sem_stderr": 0.03849856098794091 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9351230425055929, + "math_pass@1:1_samples_stderr": 0.011663051339533824 + }, + "mm|arc_challenge|0": { + "sem": 0.9501312335958005, + "sem_stderr": 0.011166429074111667 + }, + "all": { + "math_pass@1:1_samples": 0.9675615212527964, + "math_pass@1:1_samples_stderr": 0.005831525669766912, + "sem": 0.8824675045795813, + "sem_stderr": 0.018844069433369727 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|math_500|0": 40, + "mm|commonsenseqa|0": 320, + "mm|arc_easy|0": 947, + "mm|truthfulqa|0": 121, + "mm|gsm8k|0": 447, + "mm|arc_challenge|0": 381 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json b/test/0-1k/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json new file mode 100644 index 0000000000000000000000000000000000000000..2e3694c943e1ba2789daa020fb05032bf580b749 --- /dev/null +++ b/test/0-1k/results/._models_R-Phi4/results_2025-06-23T01-52-10.258150.json @@ -0,0 +1,53 @@ +{ + "results": { + "mm|commonsenseqa|0": { + "sem": 0.88125, + "sem_stderr": 0.018112192805211768 + }, + "mm|truthfulqa|0": { + "sem": 0.8429752066115702, + "sem_stderr": 0.03321244842547129 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 1.0, + "math_pass@1:1_samples_stderr": 0.0 + }, + "mm|arc_challenge|0": { + "sem": 0.952755905511811, + "sem_stderr": 0.010883605491044059 + }, + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.9485458612975392, + "math_pass@1:1_samples_stderr": 0.010460968487095353 + }, + "mm|arc_easy|0": { + "sem": 0.9767687434002112, + "sem_stderr": 0.00489763774838021 + }, + "all": { + "sem": 0.9134374638808982, + "sem_stderr": 0.016776471117526833, + "math_pass@1:1_samples": 0.9742729306487696, + "math_pass@1:1_samples_stderr": 0.0052304842435476765 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|commonsenseqa|0": 320, + "mm|truthfulqa|0": 121, + "mm|math_500|0": 40, + "mm|arc_challenge|0": 381, + "mm|gsm8k|0": 447, + "mm|arc_easy|0": 947 + } +} \ No newline at end of file diff --git a/test/0-1k/results/._models_R1-Llama3-8B/results_2025-06-23T10-15-33.465228.json b/test/0-1k/results/._models_R1-Llama3-8B/results_2025-06-23T10-15-33.465228.json new file mode 100644 index 0000000000000000000000000000000000000000..d2f41244f84849b559f79fe726be99e4c748260f --- /dev/null +++ b/test/0-1k/results/._models_R1-Llama3-8B/results_2025-06-23T10-15-33.465228.json @@ -0,0 +1,57 @@ +{ + "results": { + "mm|gsm8k|0": { + "math_pass@1:1_samples": 0.7592267135325131, + "math_pass@1:1_samples_stderr": 0.017939722576203666 + }, + "mm|arc_easy|0": { + "sem": 0.9868263473053892, + "sem_stderr": 0.003948120939888521 + }, + "mm|math_500|0": { + "math_pass@1:1_samples": 0.9148936170212766, + "math_pass@1:1_samples_stderr": 0.041142194186747624 + }, + "mm|arc_challenge|0": { + "sem": 0.9657320872274143, + "sem_stderr": 0.01016944303520295 + }, + "mm|truthfulqa|0": { + "sem": 0.75, + "sem_stderr": 0.04752931878933585 + }, + "mm|gpqa_diamond|0": { + "sem": 0.0 + }, + "mm|commonsenseqa|0": { + "sem": 0.8869257950530035, + "sem_stderr": 0.018858227047766424 + }, + "all": { + "math_pass@1:1_samples": 0.8370601652768949, + "math_pass@1:1_samples_stderr": 0.029540958381475645, + "sem": 0.7178968459171614, + "sem_stderr": 0.020126277453048437 + } + }, + "versions": { + "mm|aime24|0": 3, + "mm|arc_challenge|0": 0, + "mm|arc_easy|0": 0, + "mm|commonsenseqa|0": 0, + "mm|gpqa_diamond|0": 2, + "mm|gsm8k|0": 0, + "mm|math_500|0": 3, + "mm|mmlu_pro|0": 0, + "mm|truthfulqa|0": 0 + }, + "size": { + "mm|gsm8k|0": 569, + "mm|arc_easy|0": 835, + "mm|math_500|0": 47, + "mm|arc_challenge|0": 321, + "mm|truthfulqa|0": 84, + "mm|gpqa_diamond|0": 1, + "mm|commonsenseqa|0": 283 + } +} \ No newline at end of file