| |
| import gc |
| import torch |
| from transformers import AutoModelForCausalLM, Mxfp4Config |
|
|
| MODEL_ID = "openai/gpt-oss-20b" |
| DEVICE = "cuda:0" |
|
|
| def get_used_gb(): |
| free, total = torch.cuda.mem_get_info() |
| return (total - free) / (1024**3), total / (1024**3) |
|
|
| def clear_memory(): |
| del_vars = [k for k in list(globals().keys()) if k.startswith("_tmp_")] |
| for k in del_vars: |
| globals().pop(k, None) |
| gc.collect() |
| torch.cuda.empty_cache() |
| torch.cuda.synchronize() |
|
|
| assert torch.cuda.is_available(), "CUDA is not available." |
|
|
| |
| clear_memory() |
| before_deq_used, total_gb = get_used_gb() |
| qconf = Mxfp4Config(dequantize=True) |
| model_deq = AutoModelForCausalLM.from_pretrained( |
| MODEL_ID, |
| torch_dtype="auto", |
| device_map=DEVICE, |
| quantization_config=qconf, |
| ).eval() |
| after_deq_used, _ = get_used_gb() |
|
|
| |
| del model_deq |
| clear_memory() |
| before_q_used, _ = get_used_gb() |
| model_q = AutoModelForCausalLM.from_pretrained( |
| MODEL_ID, |
| torch_dtype="auto", |
| device_map=DEVICE, |
| ).eval() |
| after_q_used, _ = get_used_gb() |
|
|
| print(f"[dequantized] used before: {before_deq_used:.2f} GB, after: {after_deq_used:.2f} GB / total {total_gb:.2f} GB") |
| print(f"[quantized ] used before: {before_q_used:.2f} GB, after: {after_q_used:.2f} GB / total {total_gb:.2f} GB") |
|
|
| |
| mx_results = { |
| "total_gb": total_gb, |
| "after_dequantized_gb": after_deq_used, |
| "after_quantized_gb": after_q_used, |
| } |
|
|
| |
| |
| |