| import torch |
| from torch.utils import benchmark |
| from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config |
|
|
| |
| MODEL_ID = "openai/gpt-oss-20b" |
| MAX_NEW_TOKENS = 256 |
| BENCHMARK_RUNS = 5 |
|
|
| SOURCE_FILES = [ |
| "/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modeling_gpt_oss.py", |
| "/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modular_gpt_oss.py", |
| ] |
|
|
|
|
| |
| def load_model(model_id: str, use_attn_kernels: bool): |
| """Load model with optional attention kernel optimization.""" |
| quantization_config = Mxfp4Config(dequantize=True) |
| |
| kwargs = { |
| "dtype": "auto", |
| "device_map": "cuda:0", |
| "use_kernels": False, |
| "quantization_config": quantization_config, |
| } |
| |
| if use_attn_kernels: |
| kwargs["attn_implementation"] = "kernels-community/vllm-flash-attn3" |
|
|
| return AutoModelForCausalLM.from_pretrained(model_id, **kwargs).eval() |
|
|
|
|
| def unload_model(model): |
| """Move model to CPU and free GPU memory.""" |
| model.to("cpu") |
| del model |
| torch.cuda.empty_cache() |
|
|
|
|
| |
| def generate(model, model_inputs: dict, max_new_tokens: int): |
| """Run inference without sampling.""" |
| with torch.inference_mode(): |
| model.generate( |
| **model_inputs, |
| do_sample=False, |
| temperature=None, |
| max_new_tokens=max_new_tokens, |
| eos_token_id=-1, |
| disable_compile=True, |
| ) |
|
|
|
|
| |
| def load_prompts(filepaths: list[str]) -> list[str]: |
| """Read source files and create summarization prompts.""" |
| prompts = [] |
| for filepath in filepaths: |
| with open(filepath, "r") as f: |
| prompts.append(f"{f.read()}\nSummarize this for me.") |
| return prompts |
|
|
|
|
| def tokenize_prompts(tokenizer, prompts: list[str]) -> list[tuple[dict, int]]: |
| """Tokenize prompts and return inputs with their prefill sizes.""" |
| tokenizer.padding_side = "left" |
| tokenized = [] |
|
|
| for prompt in prompts: |
| message = [{"role": "user", "content": prompt}] |
| text = tokenizer.apply_chat_template( |
| message, |
| add_generation_prompt=True, |
| tokenize=False, |
| reasoning_effort="low", |
| ) |
| inputs = tokenizer(text, return_tensors="pt", padding=True) |
| prefill_size = inputs.input_ids.size(1) |
| tokenized.append((inputs, prefill_size)) |
|
|
| return tokenized |
|
|
|
|
| |
| def run_benchmarks(model, tokenized_inputs: list[tuple], use_attn_kernels: bool) -> list: |
| """Run timing benchmarks for each input.""" |
| results = [] |
|
|
| for inputs, prefill_size in tokenized_inputs: |
| timer = benchmark.Timer( |
| stmt="generate(model, model_inputs, max_new_tokens)", |
| setup="from __main__ import generate", |
| globals={ |
| "model": model, |
| "model_inputs": inputs.to(model.device), |
| "max_new_tokens": MAX_NEW_TOKENS, |
| }, |
| num_threads=torch.get_num_threads(), |
| label=f"Time to generate {MAX_NEW_TOKENS} tokens", |
| sub_label=f"prefill_size={prefill_size}", |
| description=f"attn_kernels={use_attn_kernels}", |
| ) |
| results.append(timer.timeit(BENCHMARK_RUNS)) |
|
|
| return results |
|
|
|
|
| |
| def main(): |
| prompts = load_prompts(SOURCE_FILES) |
|
|
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
| tokenized_inputs = tokenize_prompts(tokenizer, prompts) |
|
|
| all_results = [] |
| for use_attn_kernels in [True, False]: |
| print(f"\nBenchmarking with attn_kernels={use_attn_kernels}...") |
|
|
| model = load_model(MODEL_ID, use_attn_kernels) |
| results = run_benchmarks(model, tokenized_inputs, use_attn_kernels) |
| all_results.extend(results) |
| unload_model(model) |
|
|
| benchmark.Compare(all_results).print() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|
| |
| |
| |
| |
| |
|
|
| |