diff --git "a/profile.log" "b/profile.log" --- "a/profile.log" +++ "b/profile.log" @@ -4,504 +4,414 @@ Try the 'nsys status --environment' command to learn more. WARNING: CPU context switch tracing not supported, disabling. Try the 'nsys status --environment' command to learn more. -INFO 08-10 00:35:37 [__init__.py:244] Automatically detected platform cuda. +INFO 08-10 18:10:22 [__init__.py:244] Automatically detected platform cuda. INFO:__main__:FastTTS AIME Experiment INFO:__main__:================================================== INFO:__main__:Starting FastTTS AIME experiment -INFO:__main__:Parameters: {'num_iterations': 10, 'n': 512, 'temperature': 0.8, 'beam_width': 4, 'generator_model': 'Qwen/Qwen2.5-Math-1.5B-Instruct', 'verifier_model': 'peiyi9979/math-shepherd-mistral-7b-prm', 'generator_gpu_memory': 0.2, 'verifier_gpu_memory': 0.65, 'offload_enabled': False, 'spec_beam_extension': False, 'prefix_aware_scheduling': False} +INFO:__main__:Parameters: {'num_iterations': 10, 'n': 128, 'temperature': 2, 'beam_width': 4, 'generator_model': 'Qwen/Qwen2.5-Math-1.5B-Instruct', 'verifier_model': 'peiyi9979/math-shepherd-mistral-7b-prm', 'generator_gpu_memory': 0.28, 'verifier_gpu_memory': 0.62, 'offload_enabled': False, 'spec_beam_extension': False, 'prefix_aware_scheduling': False} INFO:__main__:Loaded AIME dataset with 30 samples INFO:__main__:Problem: Every morning Aya goes for a $9$-kilometer-long walk and stops at a coffee shop afterwards. When she walks at a constant speed of $s$ kilometers per hour, the walk takes her 4 hours, including $t$ minutes spent in the coffee shop. When she walks $s+2$ kilometers per hour, the walk takes her 2 hours and 24 minutes, including $t$ minutes spent in the coffee shop. Suppose Aya walks at $s+\frac{1}{2}$ kilometers per hour. Find the number of minutes the walk takes her, including the $t$ minutes spent in the coffee shop. INFO:__main__:Reference answer: 204 INFO:__main__:Initializing FastTTS models... INFO:fasttts:Initializing FastTTS models... INFO:models.vllm_wrapper:Initializing generator model: Qwen/Qwen2.5-Math-1.5B-Instruct -INFO 08-10 00:35:51 [__init__.py:244] Automatically detected platform cuda. +INFO 08-10 18:10:34 [__init__.py:244] Automatically detected platform cuda. INFO:models.tts_llm:Using V0 engine with speculative beam extension: False INFO:models.tts_llm:Prefix-aware scheduling enabled: False -✅ Process PID: 3428396 | CUDA Context Object: None -INFO 08-10 00:36:03 [config.py:841] This model supports multiple tasks: {'classify', 'generate', 'reward', 'embed'}. Defaulting to 'generate'. -INFO 08-10 00:36:03 [config.py:1472] Using max model len 4096 +✅ Process PID: 3674655 | CUDA Context Object: None +INFO 08-10 18:10:44 [config.py:841] This model supports multiple tasks: {'classify', 'embed', 'reward', 'generate'}. Defaulting to 'generate'. +INFO 08-10 18:10:44 [config.py:1472] Using max model len 4096 INFO:models.generator_engine:Using GeneratorLLMEngine with vLLM version 0.9.2 -INFO 08-10 00:36:04 [llm_engine.py:230] Initializing a V0 LLM engine (v0.9.2) with config: model='Qwen/Qwen2.5-Math-1.5B-Instruct', speculative_config=None, tokenizer='Qwen/Qwen2.5-Math-1.5B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='xgrammar', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=42, served_model_name=Qwen/Qwen2.5-Math-1.5B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=False, use_async_output_proc=True, pooler_config=None, compilation_config={"level":0,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":[],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":false,"cudagraph_num_of_warmups":0,"cudagraph_capture_sizes":[256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":256,"local_cache_dir":null}, use_cached_outputs=False, -INFO 08-10 00:36:05 [cuda.py:363] Using Flash Attention backend. -INFO 08-10 00:36:06 [parallel_state.py:1076] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0 -INFO 08-10 00:36:06 [model_runner.py:1171] Starting to load model Qwen/Qwen2.5-Math-1.5B-Instruct... -INFO 08-10 00:36:07 [weight_utils.py:292] Using model weights format ['*.safetensors'] -INFO 08-10 00:36:07 [weight_utils.py:345] No model.safetensors.index.json found in remote. +INFO 08-10 18:10:45 [llm_engine.py:230] Initializing a V0 LLM engine (v0.9.2) with config: model='Qwen/Qwen2.5-Math-1.5B-Instruct', speculative_config=None, tokenizer='Qwen/Qwen2.5-Math-1.5B-Instruct', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='xgrammar', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=42, served_model_name=Qwen/Qwen2.5-Math-1.5B-Instruct, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=True, chunked_prefill_enabled=False, use_async_output_proc=True, pooler_config=None, compilation_config={"level":0,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":[],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":false,"cudagraph_num_of_warmups":0,"cudagraph_capture_sizes":[256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":256,"local_cache_dir":null}, use_cached_outputs=False, +INFO 08-10 18:10:46 [cuda.py:363] Using Flash Attention backend. +INFO 08-10 18:10:47 [parallel_state.py:1076] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0 +INFO 08-10 18:10:47 [model_runner.py:1171] Starting to load model Qwen/Qwen2.5-Math-1.5B-Instruct... +INFO 08-10 18:10:47 [weight_utils.py:292] Using model weights format ['*.safetensors'] +INFO 08-10 18:10:48 [weight_utils.py:345] No model.safetensors.index.json found in remote. Loading safetensors checkpoint shards: 0% Completed | 0/1 [00:00, disable_hybrid_kv_cache_manager=False) INFO:models.vllm_wrapper:Generator model initialized successfully in separate process INFO:models.vllm_wrapper:Initializing verifier model: peiyi9979/math-shepherd-mistral-7b-prm -INFO 08-10 00:36:33 [__init__.py:244] Automatically detected platform cuda. +INFO 08-10 18:11:12 [__init__.py:244] Automatically detected platform cuda. INFO:models.tts_llm:Prefix-aware scheduling enabled: False -✅ Process PID: 3428950 | CUDA Context Object: None -INFO 08-10 00:36:46 [config.py:1472] Using max model len 4096 -INFO 08-10 00:36:46 [arg_utils.py:1596] (Disabling) chunked prefill by default -INFO 08-10 00:36:46 [config.py:4601] Only "last" pooling supports chunked prefill and prefix caching; disabling both. +✅ Process PID: 3675033 | CUDA Context Object: None +INFO 08-10 18:11:23 [config.py:1472] Using max model len 4096 +INFO 08-10 18:11:23 [arg_utils.py:1596] (Disabling) chunked prefill by default +INFO 08-10 18:11:23 [config.py:4601] Only "last" pooling supports chunked prefill and prefix caching; disabling both. You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file you can ignore this message You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file you can ignore this message. -INFO 08-10 00:36:48 [core.py:526] Waiting for init message from front-end. -INFO 08-10 00:36:48 [core.py:69] Initializing a V1 LLM engine (v0.9.2) with config: model='peiyi9979/math-shepherd-mistral-7b-prm', speculative_config=None, tokenizer='peiyi9979/math-shepherd-mistral-7b-prm', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='xgrammar', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=42, served_model_name=peiyi9979/math-shepherd-mistral-7b-prm, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, pooler_config=PoolerConfig(pooling_type='STEP', normalize=None, softmax=True, step_tag_id=12902, returned_token_ids=[648, 387]), compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null} -INFO 08-10 00:36:49 [parallel_state.py:1076] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0 -WARNING 08-10 00:36:49 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. -INFO 08-10 00:36:49 [gpu_model_runner.py:1770] Starting to load model peiyi9979/math-shepherd-mistral-7b-prm... -INFO 08-10 00:36:49 [gpu_model_runner.py:1775] Loading model from scratch... -INFO 08-10 00:36:49 [cuda.py:284] Using Flash Attention backend on V1 engine. -INFO 08-10 00:36:50 [weight_utils.py:292] Using model weights format ['*.bin'] +INFO 08-10 18:11:25 [core.py:526] Waiting for init message from front-end. +INFO 08-10 18:11:25 [core.py:69] Initializing a V1 LLM engine (v0.9.2) with config: model='peiyi9979/math-shepherd-mistral-7b-prm', speculative_config=None, tokenizer='peiyi9979/math-shepherd-mistral-7b-prm', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=False, dtype=torch.bfloat16, max_seq_len=4096, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=1, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=False, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='xgrammar', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=42, served_model_name=peiyi9979/math-shepherd-mistral-7b-prm, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=False, chunked_prefill_enabled=False, use_async_output_proc=False, pooler_config=PoolerConfig(pooling_type='STEP', normalize=None, softmax=True, step_tag_id=12902, returned_token_ids=[648, 387]), compilation_config={"level":3,"debug_dump_path":"","cache_dir":"","backend":"","custom_ops":[],"splitting_ops":["vllm.unified_attention","vllm.unified_attention_with_output"],"use_inductor":true,"compile_sizes":[],"inductor_compile_config":{"enable_auto_functionalized_v2":false},"inductor_passes":{},"use_cudagraph":true,"cudagraph_num_of_warmups":1,"cudagraph_capture_sizes":[512,504,496,488,480,472,464,456,448,440,432,424,416,408,400,392,384,376,368,360,352,344,336,328,320,312,304,296,288,280,272,264,256,248,240,232,224,216,208,200,192,184,176,168,160,152,144,136,128,120,112,104,96,88,80,72,64,56,48,40,32,24,16,8,4,2,1],"cudagraph_copy_inputs":false,"full_cuda_graph":false,"max_capture_size":512,"local_cache_dir":null} +INFO 08-10 18:11:25 [parallel_state.py:1076] rank 0 in world size 1 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0 +WARNING 08-10 18:11:26 [topk_topp_sampler.py:59] FlashInfer is not available. Falling back to the PyTorch-native implementation of top-p & top-k sampling. For the best performance, please install FlashInfer. +INFO 08-10 18:11:26 [gpu_model_runner.py:1770] Starting to load model peiyi9979/math-shepherd-mistral-7b-prm... +INFO 08-10 18:11:26 [gpu_model_runner.py:1775] Loading model from scratch... +INFO 08-10 18:11:26 [cuda.py:284] Using Flash Attention backend on V1 engine. +INFO 08-10 18:11:27 [weight_utils.py:292] Using model weights format ['*.bin'] Loading pt checkpoint shards: 0% Completed | 0/2 [00:00 + main() + ^^^^^^ + File "/home/cy/hmarkc/FastTTS/run_aime_fasttts.py", line 222, in main + results = run_aime_fasttts(args) + ^^^^^^^^^^^^^^^^^^^^^^ + File "/home/cy/hmarkc/FastTTS/run_aime_fasttts.py", line 173, in run_aime_fasttts + results = fasttts.search([problem], search_config=search_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/cy/hmarkc/FastTTS/fasttts.py", line 107, in search + return self._process_batch(problems, search_config) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/cy/hmarkc/FastTTS/fasttts.py", line 75, in _process_batch + return beam_search(examples, search_config, self.generator, self.verifier) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/cy/hmarkc/FastTTS/search/beam_search.py", line 501, in beam_search + completed_beams, total_generator_latency_s, total_verifier_latency_s, n_generator_latency_s, n_verifier_latency_s, total_num_tokens, n_completion_tokens, extended_tokens_list = _beam_search(problems, search_config, generator, verifier) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/cy/hmarkc/FastTTS/search/beam_search.py", line 349, in _beam_search + gen_results, gen_time = generate_beam( + ^^^^^^^^^^^^^^ + File "/home/cy/hmarkc/FastTTS/search/beam_search.py", line 121, in generate_beam + llm_outputs = generator.generate(gen_prompts, sampling_params=current_sampling_params, priority=prefix_priorities) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/home/cy/hmarkc/FastTTS/models/vllm_wrapper.py", line 289, in generate + raise RuntimeError(f"Failed to generate: {result['error']}") +RuntimeError: Failed to generate: The decoder prompt (length 4121) is longer than the maximum model length of 4096. Make sure that `max_model_len` is no smaller than the number of text tokens. GPU 3: General Metrics for NVIDIA AD10x (any frequency) -Generating '/tmp/nsys-report-31ca.qdstrm' - [1/8] [0% ] vllm_tts.nsys-rep [1/8] [0% ] vllm_tts.nsys-rep [1/8] [5% ] vllm_tts.nsys-rep [1/8] [6% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [14% ] vllm_tts.nsys-rep [1/8] [=15% ] vllm_tts.nsys-rep [1/8] [=16% ] vllm_tts.nsys-rep [1/8] [=15% ] vllm_tts.nsys-rep [1/8] [=16% ] vllm_tts.nsys-rep [1/8] [=15% ] vllm_tts.nsys-rep [1/8] [14% ] vllm_tts.nsys-rep [1/8] [=15% ] vllm_tts.nsys-rep [1/8] [14% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [14% ] vllm_tts.nsys-rep [1/8] [=15% ] vllm_tts.nsys-rep [1/8] [=16% ] vllm_tts.nsys-rep [1/8] [=17% ] vllm_tts.nsys-rep [1/8] [==18% ] vllm_tts.nsys-rep [1/8] [==19% ] vllm_tts.nsys-rep [1/8] [==20% ] vllm_tts.nsys-rep [1/8] [==21% ] vllm_tts.nsys-rep [1/8] [===22% ] vllm_tts.nsys-rep [1/8] [===23% ] vllm_tts.nsys-rep [1/8] [===24% ] vllm_tts.nsys-rep [1/8] [====25% ] vllm_tts.nsys-rep [1/8] [====26% ] vllm_tts.nsys-rep [1/8] [====27% ] vllm_tts.nsys-rep [1/8] [====28% ] vllm_tts.nsys-rep [1/8] [=====29% ] vllm_tts.nsys-rep [1/8] [=====30% ] vllm_tts.nsys-rep [1/8] [=====31% ] vllm_tts.nsys-rep [1/8] [=====32% ] vllm_tts.nsys-rep [1/8] [======33% ] vllm_tts.nsys-rep [1/8] [======34% ] vllm_tts.nsys-rep [1/8] [======35% ] vllm_tts.nsys-rep [1/8] [=======36% ] vllm_tts.nsys-rep [1/8] [=======37% ] vllm_tts.nsys-rep [1/8] [=======38% ] vllm_tts.nsys-rep [1/8] [=======39% ] vllm_tts.nsys-rep [1/8] [========40% ] vllm_tts.nsys-rep [1/8] [========41% ] vllm_tts.nsys-rep [1/8] [========42% ] vllm_tts.nsys-rep [1/8] [=========43% ] vllm_tts.nsys-rep [1/8] [=========44% ] vllm_tts.nsys-rep [1/8] [=========45% ] vllm_tts.nsys-rep [1/8] [=========46% ] vllm_tts.nsys-rep [1/8] [==========47% ] vllm_tts.nsys-rep [1/8] [==========48% ] vllm_tts.nsys-rep [1/8] [==========49% ] vllm_tts.nsys-rep [1/8] [===========50% ] vllm_tts.nsys-rep [1/8] [===========51% ] vllm_tts.nsys-rep [1/8] [===========52% ] vllm_tts.nsys-rep [1/8] [===========53% ] vllm_tts.nsys-rep [1/8] [============54% ] vllm_tts.nsys-rep [1/8] [============55% ] vllm_tts.nsys-rep [1/8] [============56% ] vllm_tts.nsys-rep [1/8] [============57% ] vllm_tts.nsys-rep [1/8] [=============58% ] vllm_tts.nsys-rep [1/8] [=============59% ] vllm_tts.nsys-rep [1/8] [=============60% ] vllm_tts.nsys-rep [1/8] [==============61% ] vllm_tts.nsys-rep [1/8] [==============62% ] vllm_tts.nsys-rep [1/8] [==============63% ] vllm_tts.nsys-rep [1/8] [========================100%] vllm_tts.nsys-rep [1/8] [========================100%] vllm_tts.nsys-rep +Generating '/tmp/nsys-report-d5d5.qdstrm' + [1/8] [0% ] vllm_tts.nsys-rep [1/8] [0% ] vllm_tts.nsys-rep [1/8] [5% ] vllm_tts.nsys-rep [1/8] [6% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [6% ] vllm_tts.nsys-rep [1/8] [7% ] vllm_tts.nsys-rep [1/8] [8% ] vllm_tts.nsys-rep [1/8] [9% ] vllm_tts.nsys-rep [1/8] [10% ] vllm_tts.nsys-rep [1/8] [11% ] vllm_tts.nsys-rep [1/8] [12% ] vllm_tts.nsys-rep [1/8] [13% ] vllm_tts.nsys-rep [1/8] [14% ] vllm_tts.nsys-rep [1/8] [=15% ] vllm_tts.nsys-rep [1/8] [=16% ] vllm_tts.nsys-rep [1/8] [=17% ] vllm_tts.nsys-rep [1/8] [==18% ] vllm_tts.nsys-rep [1/8] [==19% ] vllm_tts.nsys-rep [1/8] [==20% ] vllm_tts.nsys-rep [1/8] [==21% ] vllm_tts.nsys-rep [1/8] [===22% ] vllm_tts.nsys-rep [1/8] [===23% ] vllm_tts.nsys-rep [1/8] [===24% ] vllm_tts.nsys-rep [1/8] [====25% ] vllm_tts.nsys-rep [1/8] [====26% ] vllm_tts.nsys-rep [1/8] [====27% ] vllm_tts.nsys-rep [1/8] [====28% ] vllm_tts.nsys-rep [1/8] [=====29% ] vllm_tts.nsys-rep [1/8] [=====30% ] vllm_tts.nsys-rep [1/8] [=====31% ] vllm_tts.nsys-rep [1/8] [=====32% ] vllm_tts.nsys-rep [1/8] [======33% ] vllm_tts.nsys-rep [1/8] [======34% ] vllm_tts.nsys-rep [1/8] [======35% ] vllm_tts.nsys-rep [1/8] [=======36% ] vllm_tts.nsys-rep [1/8] [=======37% ] vllm_tts.nsys-rep [1/8] [=======38% ] vllm_tts.nsys-rep [1/8] [=======39% ] vllm_tts.nsys-rep [1/8] [========40% ] vllm_tts.nsys-rep [1/8] [========41% ] vllm_tts.nsys-rep [1/8] [========42% ] vllm_tts.nsys-rep [1/8] [=========43% ] vllm_tts.nsys-rep [1/8] [=========44% ] vllm_tts.nsys-rep [1/8] [=========45% ] vllm_tts.nsys-rep [1/8] [=========46% ] vllm_tts.nsys-rep [1/8] [==========47% ] vllm_tts.nsys-rep [1/8] [==========48% ] vllm_tts.nsys-rep [1/8] [==========49% ] vllm_tts.nsys-rep [1/8] [===========50% ] vllm_tts.nsys-rep [1/8] [===========51% ] vllm_tts.nsys-rep [1/8] [===========52% ] vllm_tts.nsys-rep [1/8] [===========53% ] vllm_tts.nsys-rep [1/8] [============54% ] vllm_tts.nsys-rep [1/8] [============55% ] vllm_tts.nsys-rep [1/8] [============56% ] vllm_tts.nsys-rep [1/8] [============57% ] vllm_tts.nsys-rep [1/8] [=============58% ] vllm_tts.nsys-rep [1/8] [=============59% ] vllm_tts.nsys-rep [1/8] [=============60% ] vllm_tts.nsys-rep [1/8] [==============61% ] vllm_tts.nsys-rep [1/8] [========================100%] vllm_tts.nsys-rep [1/8] [========================100%] vllm_tts.nsys-rep [2/8] [0% ] vllm_tts.sqlite [2/8] [1% ] vllm_tts.sqlite [2/8] [2% ] vllm_tts.sqlite [2/8] [3% ] vllm_tts.sqlite [2/8] [4% ] vllm_tts.sqlite [2/8] [5% ] vllm_tts.sqlite [2/8] [6% ] vllm_tts.sqlite [2/8] [7% ] vllm_tts.sqlite [2/8] [8% ] vllm_tts.sqlite [2/8] [9% ] vllm_tts.sqlite [2/8] [10% ] vllm_tts.sqlite [2/8] [11% ] vllm_tts.sqlite [2/8] [12% ] vllm_tts.sqlite [2/8] [13% ] vllm_tts.sqlite [2/8] [14% ] vllm_tts.sqlite [2/8] [=15% ] vllm_tts.sqlite [2/8] [=16% ] vllm_tts.sqlite [2/8] [=17% ] vllm_tts.sqlite [2/8] [==18% ] vllm_tts.sqlite [2/8] [==19% ] vllm_tts.sqlite [2/8] [==20% ] vllm_tts.sqlite [2/8] [==21% ] vllm_tts.sqlite [2/8] [===22% ] vllm_tts.sqlite [2/8] [===23% ] vllm_tts.sqlite [2/8] [===24% ] vllm_tts.sqlite [2/8] [====25% ] vllm_tts.sqlite [2/8] [====26% ] vllm_tts.sqlite [2/8] [====27% ] vllm_tts.sqlite [2/8] [====28% ] vllm_tts.sqlite [2/8] [=====29% ] vllm_tts.sqlite [2/8] [=====30% ] vllm_tts.sqlite [2/8] [=====31% ] vllm_tts.sqlite [2/8] [=====32% ] vllm_tts.sqlite [2/8] [======33% ] vllm_tts.sqlite [2/8] [======34% ] vllm_tts.sqlite [2/8] [======35% ] vllm_tts.sqlite [2/8] [=======36% ] vllm_tts.sqlite [2/8] [=======37% ] vllm_tts.sqlite [2/8] [=======38% ] vllm_tts.sqlite [2/8] [=======39% ] vllm_tts.sqlite [2/8] [========40% ] vllm_tts.sqlite [2/8] [========41% ] vllm_tts.sqlite [2/8] [========42% ] vllm_tts.sqlite [2/8] [=========43% ] vllm_tts.sqlite [2/8] [=========44% ] vllm_tts.sqlite [2/8] [=========45% ] vllm_tts.sqlite [2/8] [=========46% ] vllm_tts.sqlite [2/8] [==========47% ] vllm_tts.sqlite [2/8] [==========48% ] vllm_tts.sqlite [2/8] [==========49% ] vllm_tts.sqlite [2/8] [===========50% ] vllm_tts.sqlite [2/8] [===========51% ] vllm_tts.sqlite [2/8] [===========52% ] vllm_tts.sqlite [2/8] [===========53% ] vllm_tts.sqlite [2/8] [============54% ] vllm_tts.sqlite [2/8] [============55% ] vllm_tts.sqlite [2/8] [============56% ] vllm_tts.sqlite [2/8] [============57% ] vllm_tts.sqlite [2/8] [=============58% ] vllm_tts.sqlite [2/8] [=============59% ] vllm_tts.sqlite [2/8] [=============60% ] vllm_tts.sqlite [2/8] [==============61% ] vllm_tts.sqlite [2/8] [==============62% ] vllm_tts.sqlite [2/8] [==============63% ] vllm_tts.sqlite [2/8] [==============64% ] vllm_tts.sqlite [2/8] [===============65% ] vllm_tts.sqlite [2/8] [===============66% ] vllm_tts.sqlite [2/8] [===============67% ] vllm_tts.sqlite [2/8] [================68% ] vllm_tts.sqlite [2/8] [================69% ] vllm_tts.sqlite [2/8] [================70% ] vllm_tts.sqlite [2/8] [================71% ] vllm_tts.sqlite [2/8] [=================72% ] vllm_tts.sqlite [2/8] [=================73% ] vllm_tts.sqlite [2/8] [=================74% ] vllm_tts.sqlite [2/8] [==================75% ] vllm_tts.sqlite [2/8] [==================76% ] vllm_tts.sqlite [2/8] [==================77% ] vllm_tts.sqlite [2/8] [==================78% ] vllm_tts.sqlite [2/8] [===================79% ] vllm_tts.sqlite [2/8] [===================80% ] vllm_tts.sqlite [2/8] [===================81% ] vllm_tts.sqlite [2/8] [===================82% ] vllm_tts.sqlite [2/8] [====================83% ] vllm_tts.sqlite [2/8] [====================84% ] vllm_tts.sqlite [2/8] [====================85% ] vllm_tts.sqlite [2/8] [=====================86% ] vllm_tts.sqlite [2/8] [=====================87% ] vllm_tts.sqlite [2/8] [=====================88% ] vllm_tts.sqlite [2/8] [=====================89% ] vllm_tts.sqlite [2/8] [======================90% ] vllm_tts.sqlite [2/8] [======================91% ] vllm_tts.sqlite [2/8] [======================92% ] vllm_tts.sqlite [2/8] [=======================93% ] vllm_tts.sqlite [2/8] [=======================94% ] vllm_tts.sqlite [2/8] [=======================95% ] vllm_tts.sqlite [2/8] [=======================96% ] vllm_tts.sqlite [2/8] [========================97% ] vllm_tts.sqlite [2/8] [========================98% ] vllm_tts.sqlite [2/8] [========================99% ] vllm_tts.sqlite [2/8] [========================100%] vllm_tts.sqlite [2/8] [========================100%] vllm_tts.sqlite [3/8] Executing 'nvtx_sum' stats report Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Style Range -------- --------------- --------- ----------------- ----------------- --------------- --------------- ---------------- ------- ---------------------------------- - 50.4 615,508,900,650 1 615,508,900,650.0 615,508,900,650.0 615,508,900,650 615,508,900,650 0.0 PushPop :Total - 30.9 377,775,113,492 10 37,777,511,349.2 33,926,317,513.0 17,409,073,850 85,275,584,293 19,187,080,091.7 PushPop :encode - 18.7 228,062,869,822 10 22,806,286,982.2 13,053,895,310.5 5,568,975,355 124,783,252,393 36,158,420,234.8 PushPop :generate - 0.0 73,018 1 73,018.0 73,018.0 73,018 73,018 0.0 PushPop CCCL:cub::DeviceSegmentedRadixSort + 50.4 393,853,695,354 1 393,853,695,354.0 393,853,695,354.0 393,853,695,354 393,853,695,354 0.0 PushPop :Total + 35.8 279,622,144,381 7 39,946,020,625.9 45,828,385,121.0 11,745,200,369 53,173,117,619 15,447,291,549.1 PushPop :encode + 13.7 107,321,220,075 8 13,415,152,509.4 13,699,450,799.5 8,606,313,131 16,197,341,448 2,353,657,800.2 PushPop :generate + 0.0 50,608 1 50,608.0 50,608.0 50,608 50,608 0.0 PushPop CCCL:cub::DeviceSegmentedRadixSort [4/8] Executing 'osrt_sum' stats report - Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name - -------- ----------------- --------- ----------------- ----------------- --------------- --------------- --------------- ---------------------- - 32.6 7,792,704,953,495 44,636 174,583,407.0 100,066,997.0 1,047 480,000,113,712 5,080,249,500.0 pthread_cond_timedwait - 26.1 6,236,860,651,313 364,768 17,098,157.3 10,061,638.0 1,003 623,604,969,227 1,481,816,448.1 epoll_wait - 14.2 3,389,193,863,707 351 9,655,822,973.5 10,000,074,891.0 37,447 10,000,146,504 1,763,391,564.1 sem_timedwait - 9.1 2,168,140,683,077 68,005 31,882,077.5 3,994.0 1,000 604,316,543,844 2,587,754,992.7 read - 8.1 1,931,485,101,475 110 17,558,955,468.0 15,562,101,892.0 19,296 75,268,771,931 8,474,671,246.1 pthread_cond_wait - 5.9 1,418,605,571,886 14,454 98,146,227.5 100,117,929.0 1,000 127,360,173,650 1,133,374,130.1 poll - 2.5 600,000,078,949 1 600,000,078,949.0 600,000,078,949.0 600,000,078,949 600,000,078,949 0.0 clock_nanosleep - 1.6 376,631,000,921 1,073 351,007,456.6 368,488,167.0 22,208 395,262,785 65,512,278.4 sem_wait - 0.0 3,208,341,718 7,171 447,405.1 2,744.0 1,002 116,845,014 6,559,246.0 ioctl - 0.0 1,297,424,271 385 3,369,933.2 1,060.0 1,000 1,231,617,618 62,805,835.6 waitpid - 0.0 837,925,322 328 2,554,650.4 1,541,758.0 2,653 15,931,868 2,875,058.5 pthread_rwlock_wrlock - 0.0 508,903,377 148,813 3,419.8 1,845.0 1,033 167,484,803 434,171.9 munmap - 0.0 449,132,516 523 858,762.0 2,953.0 1,203 47,760,328 4,273,843.6 fopen - 0.0 202,771,605 40 5,069,290.1 5,066,371.5 5,054,027 5,090,400 11,668.3 nanosleep - 0.0 176,814,999 1,772 99,782.7 14,269.0 1,018 49,482,707 1,190,532.9 recv - 0.0 175,220,501 46,566 3,762.8 3,321.0 1,015 2,981,489 13,911.5 open64 - 0.0 141,296,518 19,633 7,196.9 3,231.0 1,005 2,322,257 39,875.0 write - 0.0 125,409,842 150 836,065.6 4,011.5 1,026 19,058,009 3,775,637.1 open - 0.0 107,441,657 11 9,767,423.4 32,475.0 8,648 56,014,467 21,676,550.6 connect - 0.0 83,376,665 245 340,312.9 68,441.0 50,264 11,993,587 1,637,870.8 sleep - 0.0 58,696,310 5,768 10,176.2 4,536.0 1,175 101,415 14,696.4 send - 0.0 55,541,353 374 148,506.3 5,370.5 1,858 18,101,407 1,591,906.1 fopen64 - 0.0 46,212,897 3 15,404,299.0 972,056.0 771,119 44,469,722 25,171,595.2 fork - 0.0 38,871,052 11,682 3,327.4 1,674.0 1,000 1,322,703 13,708.2 mmap64 - 0.0 34,825,505 98 355,362.3 11,006.0 3,637 7,252,382 1,114,373.7 pthread_join - 0.0 24,237,733 43 563,668.2 679,094.0 9,033 2,621,075 418,691.6 pthread_rwlock_rdlock - 0.0 16,263,294 215 75,643.2 56,672.0 18,297 647,325 76,047.9 pthread_create - 0.0 10,978,097 1,515 7,246.3 2,033.0 1,001 476,763 16,174.2 fgets - 0.0 8,320,904 6,075 1,369.7 1,217.0 1,000 11,344 523.6 epoll_ctl - 0.0 3,438,124 2,712 1,267.7 1,067.0 1,002 50,355 1,454.8 fclose - 0.0 3,149,163 114 27,624.2 18,079.0 1,062 267,708 31,501.2 pthread_mutex_lock - 0.0 2,175,752 147 14,801.0 2,941.0 1,327 335,767 44,213.9 futex - 0.0 1,637,994 476 3,441.2 2,441.0 1,001 22,365 2,700.5 pthread_cond_signal - 0.0 1,480,633 191 7,752.0 4,026.0 1,260 63,501 6,748.6 mmap - 0.0 605,007 102 5,931.4 4,486.5 2,278 20,052 3,478.2 pipe2 - 0.0 266,247 43 6,191.8 5,159.0 1,855 16,318 4,047.8 socket - 0.0 241,136 24 10,047.3 2,996.0 1,018 85,071 19,780.6 bind - 0.0 214,226 36 5,950.7 4,659.5 1,004 16,206 4,811.5 pthread_cond_broadcast - 0.0 110,036 44 2,500.8 1,703.5 1,001 31,431 4,559.6 sigaction - 0.0 97,290 15 6,486.0 7,289.0 2,686 9,080 2,394.0 pthread_mutex_trylock - 0.0 83,392 30 2,779.7 2,103.5 1,335 7,663 1,432.3 stat - 0.0 71,941 46 1,563.9 1,256.5 1,007 5,880 920.3 fcntl - 0.0 70,652 5 14,130.4 13,607.0 9,078 20,410 4,960.5 accept4 - 0.0 66,944 33 2,028.6 2,274.0 1,000 3,302 705.3 dup2 - 0.0 48,979 11 4,452.6 5,158.0 1,031 5,915 1,753.0 fflush - 0.0 47,122 8 5,890.3 5,826.5 4,578 7,397 822.6 lstat - 0.0 45,382 20 2,269.1 1,715.5 1,009 7,031 1,369.5 pread - 0.0 35,370 2 17,685.0 17,685.0 15,562 19,808 3,002.4 socketpair - 0.0 31,356 7 4,479.4 4,377.0 3,771 4,898 424.4 fputs_unlocked - 0.0 28,150 8 3,518.8 3,306.0 1,758 5,649 1,275.5 flock - 0.0 26,595 5 5,319.0 4,243.0 3,129 9,506 2,659.0 fread - 0.0 19,856 8 2,482.0 2,412.0 1,990 3,094 423.6 mprotect - 0.0 16,788 10 1,678.8 1,404.0 1,233 2,826 591.7 listen - 0.0 16,340 3 5,446.7 5,726.0 2,205 8,409 3,111.4 fwrite - 0.0 15,148 6 2,524.7 2,259.0 1,648 3,736 861.9 fstat - 0.0 7,638 2 3,819.0 3,819.0 3,757 3,881 87.7 fputs - 0.0 5,504 1 5,504.0 5,504.0 5,504 5,504 0.0 kill - 0.0 5,423 3 1,807.7 1,999.0 1,423 2,001 333.1 openat64 + Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name + -------- ----------------- --------- ---------------- ---------------- --------- --------------- --------------- ---------------------- + 29.0 4,107,957,333,358 238,517 17,222,912.1 10,061,224.0 1,000 391,167,152,042 872,427,793.8 epoll_wait + 25.0 3,547,920,544,846 29,625 119,761,031.0 100,063,686.0 1,012 1,000,142,487 122,948,044.4 pthread_cond_timedwait + 15.6 2,207,991,379,444 233 9,476,357,851.7 10,000,075,945.0 17,770 10,000,137,880 2,178,640,805.2 sem_timedwait + 11.6 1,644,165,870,679 108 15,223,758,061.8 13,321,191,303.0 9,317 50,085,818,176 7,164,644,071.4 pthread_cond_wait + 10.2 1,447,295,467,592 44,957 32,192,883.6 3,377.0 1,000 373,076,199,364 2,027,875,088.0 read + 6.6 942,233,911,978 10,134 92,977,492.8 100,114,125.0 1,000 17,768,547,837 478,817,554.3 poll + 2.0 279,272,511,042 749 372,860,495.4 413,610,654.0 18,760 451,398,921 85,966,967.2 sem_wait + 0.0 2,652,802,802 5,038 526,558.7 3,349.5 1,002 116,838,296 6,549,869.1 ioctl + 0.0 1,126,710,062 682 1,652,067.5 1,040.0 1,000 1,070,734,083 41,026,003.0 waitpid + 0.0 386,640,419 148,864 2,597.3 1,427.0 1,015 99,489,865 257,890.3 munmap + 0.0 307,226,614 522 588,556.7 2,081.0 1,037 20,005,618 3,204,254.5 fopen + 0.0 202,534,023 40 5,063,350.6 5,063,053.0 5,054,017 5,073,909 5,967.5 nanosleep + 0.0 154,008,949 46,557 3,308.0 2,602.0 1,000 15,029,894 69,665.7 open64 + 0.0 131,449,169 150 876,327.8 3,599.0 1,056 22,776,981 3,972,654.1 open + 0.0 94,621,674 88 1,075,246.3 695,819.0 3,494 5,783,134 1,252,692.1 pthread_rwlock_wrlock + 0.0 88,975,648 5,494 16,195.1 4,706.0 1,000 3,228,393 103,824.5 write + 0.0 78,944,702 1,215 64,975.1 12,599.0 1,176 2,800,486 108,421.8 recv + 0.0 67,032,431 3 22,344,143.7 1,039,256.0 858,936 65,134,239 37,057,419.3 fork + 0.0 56,511,786 374 151,101.0 4,617.0 1,920 19,021,663 1,628,648.9 fopen64 + 0.0 51,350,583 10 5,135,058.3 28,593.5 9,434 51,031,956 16,126,540.3 connect + 0.0 43,716,326 100 437,163.3 16,647.5 5,708 6,100,438 1,174,093.4 pthread_join + 0.0 33,555,772 1,935 17,341.5 7,007.0 1,344 117,177 21,053.3 send + 0.0 29,627,225 7,800 3,798.4 2,307.5 1,000 1,214,464 15,284.6 mmap64 + 0.0 28,227,618 245 115,214.8 68,764.0 41,609 11,870,902 754,141.7 sleep + 0.0 11,071,018 167 66,293.5 47,982.0 16,115 788,254 74,165.5 pthread_create + 0.0 8,394,435 1,289 6,512.4 2,201.0 1,000 89,202 8,381.1 fgets + 0.0 2,708,647 2,234 1,212.5 1,073.0 1,000 9,581 530.5 fclose + 0.0 2,649,927 1,556 1,703.0 1,418.0 1,000 18,772 876.0 epoll_ctl + 0.0 1,827,104 37 49,381.2 21,969.0 1,258 601,537 99,552.8 pthread_mutex_lock + 0.0 1,525,531 202 7,552.1 3,538.0 1,229 126,121 10,757.8 mmap + 0.0 1,442,219 12 120,184.9 135,708.5 14,897 260,157 84,664.1 pthread_rwlock_rdlock + 0.0 1,333,421 99 13,468.9 3,214.0 1,903 308,857 40,342.7 futex + 0.0 1,003,222 308 3,257.2 2,488.0 1,000 15,414 2,304.6 pthread_cond_signal + 0.0 560,112 102 5,491.3 4,417.0 1,963 16,270 3,102.1 pipe2 + 0.0 274,277 42 6,530.4 4,651.5 1,583 19,600 5,032.2 socket + 0.0 248,295 18 13,794.2 3,429.0 1,029 91,048 23,483.4 bind + 0.0 195,940 26 7,536.2 6,595.5 1,012 23,328 6,365.3 pthread_cond_broadcast + 0.0 79,849 30 2,661.6 2,095.0 1,378 7,850 1,486.4 stat + 0.0 73,833 15 4,922.2 4,711.0 2,539 7,902 1,359.0 pthread_mutex_trylock + 0.0 69,957 5 13,991.4 13,146.0 6,404 25,009 7,666.4 accept4 + 0.0 68,843 41 1,679.1 1,763.0 1,003 2,540 377.6 sigaction + 0.0 58,806 30 1,960.2 2,100.5 1,003 3,452 650.6 dup2 + 0.0 57,466 19 3,024.5 1,752.0 1,039 5,810 1,941.1 fflush + 0.0 55,461 34 1,631.2 1,186.0 1,015 6,102 1,037.6 fcntl + 0.0 45,396 8 5,674.5 5,648.0 4,969 6,340 494.9 lstat + 0.0 41,433 19 2,180.7 1,753.0 1,013 3,492 895.5 pread + 0.0 30,122 7 4,303.1 4,200.0 3,657 5,216 488.9 fputs_unlocked + 0.0 28,059 2 14,029.5 14,029.5 12,230 15,829 2,544.9 socketpair + 0.0 24,818 8 3,102.3 2,990.5 2,170 4,144 716.0 flock + 0.0 22,207 5 4,441.4 3,489.0 3,095 6,196 1,544.4 fread + 0.0 18,935 8 2,366.9 2,367.5 1,963 3,009 327.7 mprotect + 0.0 16,694 3 5,564.7 3,980.0 2,934 9,780 3,687.9 fwrite + 0.0 16,115 6 2,685.8 2,184.0 1,685 4,768 1,172.4 fstat + 0.0 12,684 8 1,585.5 1,347.0 1,026 2,482 576.4 listen + 0.0 10,302 1 10,302.0 10,302.0 10,302 10,302 0.0 kill + 0.0 8,365 2 4,182.5 4,182.5 3,963 4,402 310.4 fputs + 0.0 6,903 3 2,301.0 1,391.0 1,246 4,266 1,703.3 openat64 [5/8] Executing 'cuda_api_sum' stats report Time (%) Total Time (ns) Num Calls Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name -------- --------------- --------- ------------ ----------- --------- ----------- ------------ ------------------------------------------ - 86.3 86,504,173,850 525,959 164,469.4 4,932.0 3,013 118,362,039 841,946.9 cudaMemcpyAsync - 4.8 4,807,859,028 565,877 8,496.3 6,339.0 801 85,928,321 178,706.3 cudaLaunchKernel - 3.2 3,195,643,536 22,017 145,144.4 155,719.0 60,556 2,565,631 59,652.2 cudaGraphLaunch_v10000 - 3.0 3,002,757,415 88 34,122,243.4 24,301.5 4,400 119,950,616 50,550,161.1 cudaHostAlloc - 0.7 684,631,677 101,961 6,714.6 6,134.0 832 6,657,856 43,692.4 cuLaunchKernel - 0.6 552,319,226 368,993 1,496.8 1,161.0 621 5,126,338 25,148.5 cudaEventRecord - 0.4 411,773,507 368,986 1,116.0 803.0 349 4,151,782 6,898.3 cudaEventQuery - 0.4 403,258,245 65,874 6,121.7 5,901.0 157 7,959,262 59,811.9 cudaMemsetAsync - 0.2 168,870,006 10 16,887,000.6 42,516.0 13,234 168,508,195 53,274,263.5 cudaMemGetInfo - 0.1 84,464,220 24,640 3,427.9 3,071.0 2,372 60,514 2,218.5 cudaStreamSynchronize - 0.1 83,141,088 101,961 815.4 680.0 296 4,545,371 15,111.9 cuKernelGetFunction - 0.1 79,210,477 35 2,263,156.5 2,110,712.0 1,574,695 3,369,912 593,837.4 cudaGraphInstantiateWithFlags_v11040 - 0.1 55,155,410 68,355 806.9 930.0 298 32,757 387.6 cudaStreamIsCapturing_v10000 - 0.0 40,033,891 68 588,733.7 260,971.0 67,629 2,721,780 622,836.4 cudaFree - 0.0 34,965,116 10 3,496,511.6 3,681,205.5 87,524 6,083,451 1,971,794.0 cuLibraryLoadData - 0.0 32,204,439 35 920,126.8 894,613.0 641,194 1,179,057 156,073.7 cudaGraphExecDestroy_v10000 - 0.0 25,211,287 180 140,062.7 130,824.0 8,406 487,143 54,303.3 cudaMalloc - 0.0 5,548,549 35 158,530.0 151,726.0 129,405 200,847 19,482.2 cudaGraphDestroy_v10000 - 0.0 5,311,020 8,785 604.6 607.0 310 7,440 180.5 cudaStreamGetCaptureInfo_v2_v11030 - 0.0 4,382,614 35 125,217.5 115,772.0 101,937 322,608 36,817.5 cudaStreamEndCapture_v10000 - 0.0 3,564,640 128 27,848.8 3,239.5 2,163 1,168,183 141,059.0 cudaStreamCreateWithPriority - 0.0 2,094,035 106 19,755.0 20,374.5 3,197 119,061 16,571.0 cudaDeviceSynchronize - 0.0 878,560 35 25,101.7 25,425.0 18,338 32,172 3,249.4 cudaGraphGetNodes_v10000 - 0.0 464,499 35 13,271.4 11,152.0 8,596 24,369 4,692.4 cudaStreamBeginCapture_v10000 - 0.0 187,813 810 231.9 207.0 118 2,536 133.8 cuGetProcAddress_v2 - 0.0 49,658 26 1,909.9 599.0 459 20,314 3,920.0 cudaEventCreateWithFlags - 0.0 29,469 17 1,733.5 1,041.0 645 6,099 1,619.2 cuLibraryGetKernel - 0.0 7,478 3 2,492.7 2,363.0 2,089 3,026 481.8 cuInit - 0.0 5,741 8 717.6 610.5 468 1,501 329.3 cudaThreadExchangeStreamCaptureMode_v10010 - 0.0 4,086 1 4,086.0 4,086.0 4,086 4,086 0.0 cudaStreamWaitEvent - 0.0 1,950 3 650.0 204.0 139 1,607 829.4 cuModuleGetLoadingMode - 0.0 1,702 2 851.0 851.0 370 1,332 680.2 cudaGetDriverEntryPoint_v11030 - 0.0 1,665 1 1,665.0 1,665.0 1,665 1,665 0.0 cudaEventDestroy + 88.4 46,937,426,386 245,324 191,328.3 4,587.0 2,824 101,810,784 1,048,325.3 cudaMemcpyAsync + 4.7 2,493,394,033 88 28,334,023.1 15,760.0 3,229 119,698,802 42,440,625.4 cudaHostAlloc + 2.7 1,455,648,101 159,808 9,108.7 5,617.0 714 64,133,488 269,769.6 cudaLaunchKernel + 2.3 1,207,731,744 10,618 113,743.8 73,351.0 61,988 1,706,174 82,957.5 cudaGraphLaunch_v10000 + 0.4 215,017,756 170,401 1,261.8 1,050.0 594 1,859,958 7,635.1 cudaEventRecord + 0.3 181,609,586 170,394 1,065.8 769.0 354 4,098,109 9,965.6 cudaEventQuery + 0.2 106,372,464 10 10,637,246.4 52,428.0 9,038 105,923,811 33,480,318.7 cudaMemGetInfo + 0.2 99,317,189 17,033 5,830.9 6,201.0 650 3,803,345 29,193.0 cuLaunchKernel + 0.2 80,614,125 14,374 5,608.3 5,614.0 223 361,174 3,812.7 cudaMemsetAsync + 0.1 78,821,783 35 2,252,050.9 2,023,368.0 1,515,806 4,062,118 688,963.2 cudaGraphInstantiateWithFlags_v11040 + 0.1 40,646,025 35 1,161,315.0 1,113,301.0 799,815 1,434,417 184,273.6 cudaGraphExecDestroy_v10000 + 0.1 35,877,763 11,215 3,199.1 2,700.0 2,257 60,765 3,068.8 cudaStreamSynchronize + 0.1 35,289,694 66 534,692.3 255,452.0 70,038 2,137,121 536,637.0 cudaFree + 0.1 27,182,114 10 2,718,211.4 2,817,660.5 65,879 4,870,611 1,541,459.1 cuLibraryLoadData + 0.0 24,600,138 32,130 765.6 901.0 272 9,288 365.0 cudaStreamIsCapturing_v10000 + 0.0 21,862,560 178 122,823.4 107,050.0 4,423 440,515 60,531.1 cudaMalloc + 0.0 19,789,871 17,033 1,161.9 862.0 255 4,619,413 37,260.6 cuKernelGetFunction + 0.0 5,538,225 35 158,235.0 154,516.0 129,373 219,967 20,588.0 cudaGraphDestroy_v10000 + 0.0 5,134,115 8,785 584.4 547.0 331 2,421 170.1 cudaStreamGetCaptureInfo_v2_v11030 + 0.0 4,473,254 35 127,807.3 122,083.0 102,731 341,773 39,401.4 cudaStreamEndCapture_v10000 + 0.0 3,442,690 128 26,896.0 2,629.0 2,164 1,153,601 139,233.8 cudaStreamCreateWithPriority + 0.0 2,151,872 106 20,300.7 20,077.0 3,017 116,731 17,791.3 cudaDeviceSynchronize + 0.0 964,089 35 27,545.4 28,536.0 12,939 33,952 4,699.2 cudaGraphGetNodes_v10000 + 0.0 484,704 35 13,848.7 10,086.0 8,293 49,353 7,892.5 cudaStreamBeginCapture_v10000 + 0.0 143,228 810 176.8 142.0 78 1,674 124.6 cuGetProcAddress_v2 + 0.0 41,506 26 1,596.4 405.0 293 20,771 4,030.5 cudaEventCreateWithFlags + 0.0 20,185 16 1,261.6 755.0 388 4,976 1,169.2 cuLibraryGetKernel + 0.0 5,221 8 652.6 588.5 403 1,269 271.3 cudaThreadExchangeStreamCaptureMode_v10010 + 0.0 4,220 3 1,406.7 1,266.0 1,194 1,760 308.1 cuInit + 0.0 3,891 1 3,891.0 3,891.0 3,891 3,891 0.0 cudaStreamWaitEvent + 0.0 1,600 1 1,600.0 1,600.0 1,600 1,600 0.0 cudaEventDestroy + 0.0 1,415 3 471.7 181.0 112 1,122 564.3 cuModuleGetLoadingMode + 0.0 1,042 2 521.0 521.0 266 776 360.6 cudaGetDriverEntryPoint_v11030 [6/8] Executing 'cuda_gpu_kern_sum' stats report Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name -------- --------------- --------- ----------- ----------- --------- --------- ----------- ---------------------------------------------------------------------------------------------------- - 43.6 12,019,661,871 78,111 153,879.2 73,793.0 7,329 570,342 195,883.0 void cutlass::Kernel2(T1::Param… - 9.1 2,510,719,028 11,637 215,753.1 60,833.0 9,344 538,116 230,676.9 void cutlass::Kernel2(T1::Par… - 6.3 1,726,194,506 11,760 146,785.2 149,762.0 40,032 712,772 132,371.8 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn - 4.1 1,138,817,632 4,284 265,830.4 224,418.0 32,865 809,640 207,976.5 ampere_bf16_s1688gemm_bf16_64x128_sliced1x2_ldg8_f2f_tn - 4.0 1,092,889,165 23,063 47,387.1 30,592.0 29,537 639,239 81,136.2 void at::native::::cunn_SoftMaxForward<(int)4, float, float, float, at::native:::… - 3.9 1,068,749,647 23,062 46,342.5 25,664.0 25,088 611,302 82,496.6 void at::native::::cunn_SoftMaxForward<(int)4, float, float, float, at::native:::… - 3.8 1,037,297,919 28,980 35,793.6 25,185.0 13,920 676,807 43,597.4 void flash::flash_fwd_splitkv_kernel(T1::Par… - 1.5 417,428,003 31,220 13,370.5 5,920.0 2,912 248,705 31,481.8 void vllm::act_and_mul_kernel, (bool)1>(T1 *, cons… - 1.5 406,995,002 23,062 17,647.9 11,680.0 3,360 225,635 29,709.0 void at::native::unrolled_elementwise_kernel::distribution_elementwise_grid_stride_kernel::type internal::gemvx::kernel(int)0&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kern… - 0.6 167,031,005 4,452 37,518.2 37,377.0 36,001 46,496 961.1 void cutlass::Kernel2(T1::Para… - 0.4 119,965,264 9,352 12,827.8 12,704.0 11,616 15,744 814.6 ampere_bf16_s16816gemm_bf16_64x64_ldg8_relu_f2f_stages_64x5_tn - 0.3 91,154,352 1,428 63,833.6 63,713.0 61,792 84,448 1,714.2 void cutlass::Kernel2(T1::Para… - 0.3 89,448,039 31,220 2,865.1 2,111.0 1,535 39,360 2,963.8 void vllm::rotary_embedding_kernel(const long *, T1 *, T1 *, const T1 *, in… - 0.3 69,256,245 448 154,589.8 153,137.5 148,961 169,633 5,063.2 ampere_bf16_s1688gemm_bf16_128x128_ldg8_relu_f2f_stages_32x1_tn - 0.2 58,454,205 2,576 22,691.8 32,800.0 9,120 43,584 11,804.4 void cutlass::Kernel2(T1::Par… - 0.2 53,386,501 28,756 1,856.5 1,696.0 1,183 3,072 495.7 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo… - 0.2 52,270,119 31,192 1,675.8 1,408.0 960 13,504 1,374.5 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0… - 0.2 49,631,934 2,268 21,883.6 21,632.0 21,120 24,288 522.1 ampere_bf16_s16816gemm_bf16_128x64_ldg8_relu_f2f_stages_64x3_tn - 0.2 49,434,603 18,010 2,744.8 2,560.0 1,888 32,512 1,048.9 void at::native::::indexSelectLargeIndex, std:… - 0.1 36,104,757 1,680 21,490.9 21,473.0 20,992 22,048 190.4 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn - 0.1 29,013,907 22,017 1,317.8 1,312.0 1,120 1,953 89.9 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast::indexSelectSmallIndex::type internal::gemvx::kernel(T1 *, const T1 *, const T1 *, float, int, int) - 0.0 2,888,043 56 51,572.2 51,424.5 49,856 53,728 687.5 void flash::flash_fwd_kernel(T1::Para… - 0.0 1,253,872 364 3,444.7 3,424.0 3,103 4,064 211.3 void flash::flash_fwd_splitkv_combine_kernel::masked_fill_kernel(at… - 0.0 358,786 1 358,786.0 358,786.0 358,786 358,786 0.0 void at::native::tensor_kernel_scan_innermost_dim>(T1 *, const T1 *, unsign… - 0.0 353,446 280 1,262.3 1,217.0 1,183 1,377 59.5 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, float, __nv_bfloat16, float, (bool)0, __n… - 0.0 318,625 1 318,625.0 318,625.0 318,625 318,625 0.0 at::native::::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider, std::array::CatArrayBatchedCopy_aligned16_contig::OpaqueType<… - 0.0 2,336 2 1,168.0 1,168.0 1,088 1,248 113.1 void ::elementwise_kernel_with_index(T1::Par… + 21.3 2,162,330,143 7,723 279,985.8 137,569.0 7,808 572,614 241,368.9 void cutlass::Kernel2(T1::Param… + 6.6 675,302,986 1,542 437,939.7 488,389.0 6,976 489,318 144,690.4 std::enable_if::type internal::gemvx::kernel::cunn_SoftMaxForward<(int)4, float, float, float, at::native:::… + 4.5 456,691,515 1,624 281,214.0 123,697.5 41,857 714,468 267,999.6 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_f2f_tn + 4.4 443,295,899 700 633,279.9 82,161.0 50,336 1,413,417 610,178.0 ampere_bf16_s1688gemm_bf16_128x128_ldg8_f2f_stages_32x1_tn + 4.3 439,409,336 10,651 41,255.2 30,272.0 29,472 624,867 30,275.9 void at::native::::cunn_SoftMaxForward<(int)4, float, float, float, at::native:::… + 3.4 347,517,575 840 413,711.4 402,820.5 131,585 994,316 214,545.4 void flash::flash_fwd_splitkv_kernel(T1::Par… + 2.8 287,234,571 10,650 26,970.4 5,600.0 1,440 496,643 50,376.8 void at::native::vectorized_elementwise_kernel<(int)4, at::native::BinaryFunctor, (bool)1>(T1 *, cons… + 1.6 158,104,871 10,650 14,845.5 3,872.0 2,303 177,889 19,981.1 void at::native::::distribution_elementwise_grid_stride_kernel(int)0&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kern… + 0.2 21,779,316 6,382 3,412.6 2,880.0 1,695 6,432 1,249.9 void at::native::::indexSelectSmallIndex(const long *, T1 *, T1 *, const T1 *, in… + 0.2 17,761,784 10,651 1,667.6 1,632.0 1,248 2,721 172.3 void at::native::unrolled_elementwise_kernel(T1::Para… + 0.2 15,315,251 140 109,394.7 137,313.0 26,753 145,378 43,660.0 ampere_bf16_s1688gemm_bf16_128x64_sliced1x2_ldg8_relu_f2f_tn + 0.1 13,952,650 10,618 1,314.1 1,280.0 1,120 1,728 92.3 void at::native::elementwise_kernel<(int)128, (int)2, void at::native::gpu_kernel_impl_nocast::indexSelectLargeIndex, std:… + 0.1 8,103,531 840 9,647.1 8,160.0 6,305 15,520 2,996.4 void flash::flash_fwd_splitkv_kernel(T1::Para… + 0.1 6,334,831 3,080 2,056.8 1,888.0 1,343 3,103 552.3 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo… + 0.0 4,928,376 4 1,232,094.0 1,222,582.0 1,208,582 1,274,630 31,240.7 void at_cuda_detail::cub::DeviceSegmentedRadixSortKernel(T1::Par… + 0.0 2,897,013 56 51,732.4 51,664.0 49,761 54,177 837.6 void flash::flash_fwd_kernel::type internal::gemvx::kernel(T1 *, const T1 *, const T1 *, float, int, int) + 0.0 883,038 280 3,153.7 3,168.0 2,784 3,584 212.3 void flash::flash_fwd_splitkv_combine_kernel::masked_fill_kernel(at… + 0.0 607,545 336 1,808.2 1,792.0 1,600 2,112 115.6 void cublasLt::splitKreduce_kernel<(int)32, (int)16, int, __nv_bfloat16, __nv_bfloat16, float, (boo… + 0.0 603,943 28 21,569.4 21,568.0 21,440 21,664 45.8 ampere_bf16_s16816gemm_bf16_128x64_ldg8_f2f_stages_32x6_tn + 0.0 362,114 1 362,114.0 362,114.0 362,114 362,114 0.0 void at::native::tensor_kernel_scan_innermost_dim>(T1 *, const T1 *, unsign… + 0.0 317,665 1 317,665.0 317,665.0 317,665 317,665 0.0 at::native::::fill_reverse_indices_kernel(long *, int, at::cuda::detail::IntDivider, std::array::CatArrayBatchedCopy_aligned16_contig::OpaqueType<… + 0.0 2,559 2 1,279.5 1,279.5 1,056 1,503 316.1 void ::elementwise_kernel_with_index, st… - 0.0 1,344 1 1,344.0 1,344.0 1,344 1,344 0.0 void at::native::vectorized_elementwise_kernel<(int)8, at::native::CUDAFunctorOnOther_add, st… 0.0 1,184 1 1,184.0 1,184.0 1,184 1,184 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::reciprocal_kernel_cuda(at::Tenso… - 0.0 1,024 1 1,024.0 1,024.0 1,024 1,024 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::AUnaryFunctor, std::array, std::array<… - 0.0 896 1 896.0 896.0 896 896 0.0 void at::native::vectorized_elementwise_kernel<(int)4, at::native::FillFunctor, std::array