File size: 3,361 Bytes
7971bee
f33dbc4
 
 
 
 
7971bee
f33dbc4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7971bee
f33dbc4
 
 
 
 
 
 
 
 
 
 
 
 
 
7971bee
 
f33dbc4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7971bee
f33dbc4
 
 
 
 
 
 
 
 
 
 
 
 
7971bee
 
 
 
 
 
f33dbc4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os; os.environ["CUDA_VISIBLE_DEVICES"]="3"

import torch
from torch.utils import benchmark
from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config

def load_model(use_kernels, model_id):
    quantization_config = Mxfp4Config(dequantize=True)
    model = AutoModelForCausalLM.from_pretrained(
        model_id,
        dtype="auto",
        device_map="cuda:0",
        use_kernels=use_kernels,
        quantization_config=quantization_config,
    ).eval()
    return model

def generate(model, model_inputs, max_new_tokens):
    with torch.inference_mode():
        model.generate(
            **model_inputs,
            do_sample=False,
            temperature=None,
            max_new_tokens=max_new_tokens,
            eos_token_id=-1,
            disable_compile=True,
        )

if __name__ == "__main__":
    model_id = "openai/gpt-oss-20b"
    results = []
    max_new_tokens = 256
    base_prompts = [
        "What is Tensor Parallelism?",
        "Explain machine learning fundamentals.",
        "How do neural networks work?",
        "What are the benefits of distributed computing?",
        "Describe the attention mechanism in transformers.",
        "What is gradient descent?",
        "How does backpropagation work?",
        "Explain the concept of overfitting.",
    ]

    for use_kernels in [True, False]:
        model = load_model(use_kernels, model_id)
        for batch_size in [32, 64, 128]:
            messages = [
                [{"role": "system", "content": base_prompts[i % len(base_prompts)]}] for i in range(batch_size)
            ]
            tokenizer = AutoTokenizer.from_pretrained("openai/gpt-oss-20b")
            texts = [tokenizer.apply_chat_template(m, add_generation_prompt=True, tokenize=False, reasoning_effort="low") for m in messages]
            inputs = tokenizer(
                texts,
                return_tensors="pt",
                padding=True,
                padding_side="left",
            ).to("cuda:0")

            label = "time taken to generate"
            results.append(
                benchmark.Timer(
                    stmt="generate(model, model_inputs, max_new_tokens)",
                    setup='from __main__ import generate',
                    globals={"model": model, "model_inputs": inputs, "max_new_tokens": max_new_tokens},
                    num_threads=torch.get_num_threads(),
                    label=label,
                    sub_label=f"num tokens gen: {max_new_tokens} batch size: {batch_size}",
                    description=f"use kernels: {use_kernels}"
                ).timeit(5)
            )
            inputs.to("cpu")
            del inputs
        
        model.to("cpu")
        del model

    compare = benchmark.Compare(results)
    compare.print()


# [------------------------------ time taken to generate ------------------------------]
#                                            |  use kernels: True  |  use kernels: False
# 64 threads: --------------------------------------------------------------------------
#       num tokens gen: 256 batch size: 32   |         11.9        |         58.2       
#       num tokens gen: 256 batch size: 64   |         12.6        |        113.5       
#       num tokens gen: 256 batch size: 128  |         16.6        |        224.0       

# Times are in seconds (s).