faster-transformers-scripts / benchmark-fa3.py
ariG23498's picture
ariG23498 HF Staff
Create benchmark-fa3.py
6ad3ca7 verified
import torch
from torch.utils import benchmark
from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
# ============ CONFIGURATION ============
MODEL_ID = "openai/gpt-oss-20b"
MAX_NEW_TOKENS = 256
BENCHMARK_RUNS = 5
SOURCE_FILES = [
"/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modeling_gpt_oss.py", # Fixed: missing comma
"/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modular_gpt_oss.py",
]
# ============ MODEL LOADING ============
def load_model(model_id: str, use_attn_kernels: bool):
"""Load model with optional attention kernel optimization."""
quantization_config = Mxfp4Config(dequantize=True)
kwargs = {
"dtype": "auto",
"device_map": "cuda:0",
"use_kernels": False,
"quantization_config": quantization_config,
}
if use_attn_kernels:
kwargs["attn_implementation"] = "kernels-community/vllm-flash-attn3"
return AutoModelForCausalLM.from_pretrained(model_id, **kwargs).eval()
def unload_model(model):
"""Move model to CPU and free GPU memory."""
model.to("cpu")
del model
torch.cuda.empty_cache()
# ============ GENERATION ============
def generate(model, model_inputs: dict, max_new_tokens: int):
"""Run inference without sampling."""
with torch.inference_mode():
model.generate(
**model_inputs,
do_sample=False,
temperature=None,
max_new_tokens=max_new_tokens,
eos_token_id=-1,
disable_compile=True,
)
# ============ DATA PREPARATION ============
def load_prompts(filepaths: list[str]) -> list[str]:
"""Read source files and create summarization prompts."""
prompts = []
for filepath in filepaths:
with open(filepath, "r") as f:
prompts.append(f"{f.read()}\nSummarize this for me.")
return prompts
def tokenize_prompts(tokenizer, prompts: list[str]) -> list[tuple[dict, int]]:
"""Tokenize prompts and return inputs with their prefill sizes."""
tokenizer.padding_side = "left"
tokenized = []
for prompt in prompts:
message = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(
message, # Fixed: was `m`
add_generation_prompt=True,
tokenize=False,
reasoning_effort="low",
)
inputs = tokenizer(text, return_tensors="pt", padding=True)
prefill_size = inputs.input_ids.size(1) # Fixed: was `input` and `.size[1]`
tokenized.append((inputs, prefill_size))
return tokenized
# ============ BENCHMARKING ============
def run_benchmarks(model, tokenized_inputs: list[tuple], use_attn_kernels: bool) -> list:
"""Run timing benchmarks for each input."""
results = []
for inputs, prefill_size in tokenized_inputs:
timer = benchmark.Timer(
stmt="generate(model, model_inputs, max_new_tokens)",
setup="from __main__ import generate",
globals={
"model": model,
"model_inputs": inputs.to(model.device),
"max_new_tokens": MAX_NEW_TOKENS,
},
num_threads=torch.get_num_threads(),
label=f"Time to generate {MAX_NEW_TOKENS} tokens",
sub_label=f"prefill_size={prefill_size}",
description=f"attn_kernels={use_attn_kernels}",
)
results.append(timer.timeit(BENCHMARK_RUNS))
return results
# ============ MAIN ============
def main():
prompts = load_prompts(SOURCE_FILES)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
tokenized_inputs = tokenize_prompts(tokenizer, prompts)
all_results = []
for use_attn_kernels in [True, False]:
print(f"\nBenchmarking with attn_kernels={use_attn_kernels}...")
model = load_model(MODEL_ID, use_attn_kernels)
results = run_benchmarks(model, tokenized_inputs, use_attn_kernels)
all_results.extend(results)
unload_model(model)
benchmark.Compare(all_results).print()
if __name__ == "__main__":
main()
# [------------------ Time to generate 256 tokens -------------------]
# | attn_kernels=True | attn_kernels=False
# 12 threads: --------------------------------------------------------
# prefill_size=7353 | 8.3 | 10.2
# prefill_size=4225 | 8.3 | 9.0
# Times are in seconds (s).