File size: 1,287 Bytes
0751b6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
"""
Quick loader for INT8 quantized HunyuanImage-3.0-Instruct model.
Generated automatically by hunyuan_quantize_instruct_int8.py
"""
import torch
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
def load_quantized_instruct_int8(model_path="H:\Testing\HunyuanImage-3.0-Instruct-INT8"):
"""Load the INT8 quantized HunyuanImage-3.0-Instruct model."""
quant_config = BitsAndBytesConfig(
load_in_8bit=True,
llm_int8_threshold=6.0,
)
model = AutoModelForCausalLM.from_pretrained(
model_path,
quantization_config=quant_config,
device_map="auto",
trust_remote_code=True,
torch_dtype=torch.bfloat16,
attn_implementation="sdpa",
)
# Load tokenizer
model.load_tokenizer(model_path)
return model
if __name__ == "__main__":
print("Loading INT8 quantized Instruct model...")
model = load_quantized_instruct_int8()
print("Model loaded successfully!")
print(f"Device map: {model.hf_device_map}")
if torch.cuda.is_available():
print(f"GPU memory allocated: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
print(f"GPU memory reserved: {torch.cuda.memory_reserved() / 1024**3:.2f} GB")
|