| import os; os.environ["CUDA_VISIBLE_DEVICES"]="3" |
|
|
| import torch |
| from torch.utils import benchmark |
| from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config |
|
|
| def load_model(use_kernels, model_id): |
| quantization_config = Mxfp4Config(dequantize=True) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| dtype="auto", |
| device_map="cuda:0", |
| use_kernels=use_kernels, |
| quantization_config=quantization_config, |
| ).eval() |
| return model |
|
|
| def generate(model, model_inputs, max_new_tokens): |
| with torch.inference_mode(): |
| model.generate( |
| **model_inputs, |
| do_sample=False, |
| temperature=None, |
| max_new_tokens=max_new_tokens, |
| eos_token_id=-1, |
| disable_compile=True, |
| ) |
|
|
| if __name__ == "__main__": |
| model_id = "openai/gpt-oss-20b" |
| results = [] |
| max_new_tokens = 256 |
| base_prompts = [ |
| "What is Tensor Parallelism?", |
| "Explain machine learning fundamentals.", |
| "How do neural networks work?", |
| "What are the benefits of distributed computing?", |
| "Describe the attention mechanism in transformers.", |
| "What is gradient descent?", |
| "How does backpropagation work?", |
| "Explain the concept of overfitting.", |
| ] |
|
|
| for use_kernels in [True, False]: |
| model = load_model(use_kernels, model_id) |
| for batch_size in [32, 64, 128]: |
| messages = [ |
| [{"role": "system", "content": base_prompts[i % len(base_prompts)]}] for i in range(batch_size) |
| ] |
| tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b") |
| texts = [tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False, reasoning_effort="low") for m in messages] |
| inputs = tokenizer( |
| texts, |
| return_tensors="pt", |
| padding=True, |
| padding_side="left", |
| ).to("cuda:0") |
|
|
| label = "time taken to generate" |
| results.append( |
| benchmark.Timer( |
| stmt="generate(model, model_inputs, max_new_tokens)", |
| setup='from __main__ import generate', |
| globals={"model": model, "model_inputs": inputs, "max_new_tokens": max_new_tokens}, |
| num_threads=torch.get_num_threads(), |
| label=label, |
| sub_label=f"num tokens gen: {max_new_tokens} batch size: {batch_size}", |
| description=f"use kernels: {use_kernels}" |
| ).timeit(5) |
| ) |
| inputs.to("cpu") |
| del inputs |
| |
| model.to("cpu") |
| del model |
|
|
| compare = benchmark.Compare(results) |
| compare.print() |
|
|
|
|
| |
| |
| |
| |
| |
| |
|
|
| |