File size: 4,518 Bytes
6ad3ca7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import torch
from torch.utils import benchmark
from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config

# ============ CONFIGURATION ============
MODEL_ID = "openai/gpt-oss-20b"
MAX_NEW_TOKENS = 256
BENCHMARK_RUNS = 5

SOURCE_FILES = [
    "/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modeling_gpt_oss.py",  # Fixed: missing comma
    "/fsx/aritra/git-repos/transformers/src/transformers/models/gpt_oss/modular_gpt_oss.py",
]


# ============ MODEL LOADING ============
def load_model(model_id: str, use_attn_kernels: bool):
    """Load model with optional attention kernel optimization."""
    quantization_config = Mxfp4Config(dequantize=True)
    
    kwargs = {
        "dtype": "auto",
        "device_map": "cuda:0",
        "use_kernels": False,
        "quantization_config": quantization_config,
    }
    
    if use_attn_kernels:
        kwargs["attn_implementation"] = "kernels-community/vllm-flash-attn3"

    return AutoModelForCausalLM.from_pretrained(model_id, **kwargs).eval()


def unload_model(model):
    """Move model to CPU and free GPU memory."""
    model.to("cpu")
    del model
    torch.cuda.empty_cache()


# ============ GENERATION ============
def generate(model, model_inputs: dict, max_new_tokens: int):
    """Run inference without sampling."""
    with torch.inference_mode():
        model.generate(
            **model_inputs,
            do_sample=False,
            temperature=None,
            max_new_tokens=max_new_tokens,
            eos_token_id=-1,
            disable_compile=True,
        )


# ============ DATA PREPARATION ============
def load_prompts(filepaths: list[str]) -> list[str]:
    """Read source files and create summarization prompts."""
    prompts = []
    for filepath in filepaths:
        with open(filepath, "r") as f:
            prompts.append(f"{f.read()}\nSummarize this for me.")
    return prompts


def tokenize_prompts(tokenizer, prompts: list[str]) -> list[tuple[dict, int]]:
    """Tokenize prompts and return inputs with their prefill sizes."""
    tokenizer.padding_side = "left"
    tokenized = []

    for prompt in prompts:
        message = [{"role": "user", "content": prompt}]
        text = tokenizer.apply_chat_template(
            message,  # Fixed: was `m`
            add_generation_prompt=True,
            tokenize=False,
            reasoning_effort="low",
        )
        inputs = tokenizer(text, return_tensors="pt", padding=True)
        prefill_size = inputs.input_ids.size(1)  # Fixed: was `input` and `.size[1]`
        tokenized.append((inputs, prefill_size))

    return tokenized


# ============ BENCHMARKING ============
def run_benchmarks(model, tokenized_inputs: list[tuple], use_attn_kernels: bool) -> list:
    """Run timing benchmarks for each input."""
    results = []

    for inputs, prefill_size in tokenized_inputs:
        timer = benchmark.Timer(
            stmt="generate(model, model_inputs, max_new_tokens)",
            setup="from __main__ import generate",
            globals={
                "model": model,
                "model_inputs": inputs.to(model.device),
                "max_new_tokens": MAX_NEW_TOKENS,
            },
            num_threads=torch.get_num_threads(),
            label=f"Time to generate {MAX_NEW_TOKENS} tokens",
            sub_label=f"prefill_size={prefill_size}",
            description=f"attn_kernels={use_attn_kernels}",
        )
        results.append(timer.timeit(BENCHMARK_RUNS))

    return results


# ============ MAIN ============
def main():
    prompts = load_prompts(SOURCE_FILES)

    tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
    tokenized_inputs = tokenize_prompts(tokenizer, prompts)

    all_results = []
    for use_attn_kernels in [True, False]:
        print(f"\nBenchmarking with attn_kernels={use_attn_kernels}...")

        model = load_model(MODEL_ID, use_attn_kernels)
        results = run_benchmarks(model, tokenized_inputs, use_attn_kernels)
        all_results.extend(results)
        unload_model(model)

    benchmark.Compare(all_results).print()


if __name__ == "__main__":
    main()

# [------------------ Time to generate 256 tokens -------------------]
#                          |  attn_kernels=True  |  attn_kernels=False
# 12 threads: --------------------------------------------------------
#       prefill_size=7353  |         8.3         |         10.2       
#       prefill_size=4225  |         8.3         |          9.0       

# Times are in seconds (s).