Update benchmark-kernels-with-without.py
Browse files
benchmark-kernels-with-without.py
CHANGED
|
@@ -1,11 +1,10 @@
|
|
| 1 |
-
import os; os.environ["CUDA_VISIBLE_DEVICES"]="
|
| 2 |
|
| 3 |
import torch
|
| 4 |
from torch.utils import benchmark
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
|
| 6 |
|
| 7 |
-
def load_model(use_kernels):
|
| 8 |
-
model_id = "openai/gpt-oss-20b"
|
| 9 |
quantization_config = Mxfp4Config(dequantize=True)
|
| 10 |
model = AutoModelForCausalLM.from_pretrained(
|
| 11 |
model_id,
|
|
@@ -28,9 +27,9 @@ def generate(model, model_inputs, max_new_tokens):
|
|
| 28 |
)
|
| 29 |
|
| 30 |
if __name__ == "__main__":
|
|
|
|
| 31 |
results = []
|
| 32 |
max_new_tokens = 256
|
| 33 |
-
batch_size = 256
|
| 34 |
base_prompts = [
|
| 35 |
"What is Tensor Parallelism?",
|
| 36 |
"Explain machine learning fundamentals.",
|
|
@@ -43,8 +42,8 @@ if __name__ == "__main__":
|
|
| 43 |
]
|
| 44 |
|
| 45 |
for use_kernels in [True, False]:
|
| 46 |
-
model = load_model(use_kernels)
|
| 47 |
-
for batch_size in [32, 64, 128
|
| 48 |
messages = [
|
| 49 |
[{"role": "system", "content": base_prompts[i % len(base_prompts)]}] for i in range(batch_size)
|
| 50 |
]
|
|
@@ -65,7 +64,7 @@ if __name__ == "__main__":
|
|
| 65 |
globals={"model": model, "model_inputs": inputs, "max_new_tokens": max_new_tokens},
|
| 66 |
num_threads=torch.get_num_threads(),
|
| 67 |
label=label,
|
| 68 |
-
sub_label=f"num tokens: {max_new_tokens} batch size: {batch_size}",
|
| 69 |
description=f"use kernels: {use_kernels}"
|
| 70 |
).timeit(5)
|
| 71 |
)
|
|
@@ -79,12 +78,11 @@ if __name__ == "__main__":
|
|
| 79 |
compare.print()
|
| 80 |
|
| 81 |
|
| 82 |
-
# [---------------------------- time taken to generate ----------------------------]
|
| 83 |
-
#
|
| 84 |
-
#
|
| 85 |
-
# num tokens: 256 batch size: 32 |
|
| 86 |
-
# num tokens: 256 batch size: 64 | 12.
|
| 87 |
-
# num tokens: 256 batch size: 128 |
|
| 88 |
-
# num tokens: 256 batch size: 256 | 15.0 | 21.2
|
| 89 |
|
| 90 |
# Times are in seconds (s).
|
|
|
|
| 1 |
+
import os; os.environ["CUDA_VISIBLE_DEVICES"]="3"
|
| 2 |
|
| 3 |
import torch
|
| 4 |
from torch.utils import benchmark
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM, Mxfp4Config
|
| 6 |
|
| 7 |
+
def load_model(use_kernels, model_id):
|
|
|
|
| 8 |
quantization_config = Mxfp4Config(dequantize=True)
|
| 9 |
model = AutoModelForCausalLM.from_pretrained(
|
| 10 |
model_id,
|
|
|
|
| 27 |
)
|
| 28 |
|
| 29 |
if __name__ == "__main__":
|
| 30 |
+
model_id = "openai/gpt-oss-20b"
|
| 31 |
results = []
|
| 32 |
max_new_tokens = 256
|
|
|
|
| 33 |
base_prompts = [
|
| 34 |
"What is Tensor Parallelism?",
|
| 35 |
"Explain machine learning fundamentals.",
|
|
|
|
| 42 |
]
|
| 43 |
|
| 44 |
for use_kernels in [True, False]:
|
| 45 |
+
model = load_model(use_kernels, model_id)
|
| 46 |
+
for batch_size in [32, 64, 128]:
|
| 47 |
messages = [
|
| 48 |
[{"role": "system", "content": base_prompts[i % len(base_prompts)]}] for i in range(batch_size)
|
| 49 |
]
|
|
|
|
| 64 |
globals={"model": model, "model_inputs": inputs, "max_new_tokens": max_new_tokens},
|
| 65 |
num_threads=torch.get_num_threads(),
|
| 66 |
label=label,
|
| 67 |
+
sub_label=f"num tokens gen: {max_new_tokens} batch size: {batch_size}",
|
| 68 |
description=f"use kernels: {use_kernels}"
|
| 69 |
).timeit(5)
|
| 70 |
)
|
|
|
|
| 78 |
compare.print()
|
| 79 |
|
| 80 |
|
| 81 |
+
# [------------------------------ time taken to generate ------------------------------]
|
| 82 |
+
# | use kernels: True | use kernels: False
|
| 83 |
+
# 64 threads: --------------------------------------------------------------------------
|
| 84 |
+
# num tokens gen: 256 batch size: 32 | 11.9 | 58.2
|
| 85 |
+
# num tokens gen: 256 batch size: 64 | 12.6 | 113.5
|
| 86 |
+
# num tokens gen: 256 batch size: 128 | 16.6 | 224.0
|
|
|
|
| 87 |
|
| 88 |
# Times are in seconds (s).
|