| import os |
| os.environ["CUDA_VISIBLE_DEVICES"] = "3" |
|
|
| import torch |
| from torch.utils import benchmark |
| from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config |
|
|
|
|
| def load_model(use_kernels, use_attn_kernels, model_id): |
| quantization_config = Mxfp4Config(dequantize=True) |
| kwargs = { |
| "dtype": "auto", |
| "device_map": "cuda:0", |
| "use_kernels": use_kernels, |
| "quantization_config": quantization_config, |
| } |
| if use_attn_kernels: |
| kwargs["attn_implementation"] = "kernels-community/vllm-flash-attn3" |
| |
| return AutoModelForCausalLM.from_pretrained(model_id, **kwargs).eval() |
|
|
|
|
| def generate(model, model_inputs, max_new_tokens): |
| with torch.inference_mode(): |
| model.generate( |
| **model_inputs, |
| do_sample=False, |
| temperature=None, |
| max_new_tokens=max_new_tokens, |
| eos_token_id=-1, |
| disable_compile=True, |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| model_id = "openai/gpt-oss-20b" |
| max_new_tokens = 256 |
| batch_sizes = [32, 64, 128] |
| |
| base_prompts = [ |
| "What is Tensor Parallelism?", |
| "Explain machine learning fundamentals.", |
| "How do neural networks work?", |
| "What are the benefits of distributed computing?", |
| "Describe the attention mechanism in transformers.", |
| "What is gradient descent?", |
| "How does backpropagation work?", |
| "Explain the concept of overfitting.", |
| ] |
| |
| |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| |
| pre_tokenized = {} |
| for batch_size in batch_sizes: |
| messages = [ |
| [{"role": "user", "content": base_prompts[i % len(base_prompts)]}] |
| for i in range(batch_size) |
| ] |
| texts = [ |
| tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False, reasoning_effort="low") |
| for m in messages |
| ] |
| pre_tokenized[batch_size] = tokenizer( |
| texts, |
| return_tensors="pt", |
| padding=True, |
| padding_side="left", |
| ) |
| |
| |
| results = [] |
| |
| for use_attn_kernels in [True, False]: |
| for use_kernels in [True, False]: |
| model = load_model(use_kernels, use_attn_kernels, model_id) |
| |
| for batch_size in batch_sizes: |
| results.append( |
| benchmark.Timer( |
| stmt="generate(model, model_inputs, max_new_tokens)", |
| setup="from __main__ import generate", |
| globals={ |
| "model": model, |
| "model_inputs": pre_tokenized[batch_size].to(model.device), |
| "max_new_tokens": max_new_tokens, |
| }, |
| num_threads=torch.get_num_threads(), |
| label="Time to generate 256 tokens", |
| sub_label=f"batch_size={batch_size}", |
| description=f"kernels={use_kernels}, attn_kernels={use_attn_kernels}", |
| ).timeit(5) |
| ) |
|
|
| model.to("cpu") |
| del model |
| torch.cuda.empty_cache() |
| |
| compare = benchmark.Compare(results) |
| compare.print() |