File size: 1,520 Bytes
c7c6837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
"""

Quick loader for INT8 quantized HunyuanImage-3.0-Instruct-Distil model.

Generated automatically by hunyuan_quantize_instruct_distil_int8.py



This model is optimized for fast inference:

- CFG distillation: No classifier-free guidance needed

- Meanflow: Improved sampling

- Only 13B active params despite 80B total (MoE)

"""

import torch
from transformers import AutoModelForCausalLM, BitsAndBytesConfig

def load_quantized_instruct_distil_int8(model_path="H:\Testing\HunyuanImage-3.0-Instruct-Distil-INT8"):
    """Load the INT8 quantized HunyuanImage-3.0-Instruct-Distil model."""
    
    quant_config = BitsAndBytesConfig(
        load_in_8bit=True,
        llm_int8_threshold=6.0,
    )
    
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        quantization_config=quant_config,
        device_map="auto",
        trust_remote_code=True,
        torch_dtype=torch.bfloat16,
        attn_implementation="sdpa",
    )
    
    # Load tokenizer
    model.load_tokenizer(model_path)
    
    return model

if __name__ == "__main__":
    print("Loading INT8 quantized Instruct-Distil model...")
    model = load_quantized_instruct_distil_int8()
    print("Model loaded successfully!")
    print(f"Device map: {model.hf_device_map}")
    
    if torch.cuda.is_available():
        print(f"GPU memory allocated: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
        print(f"GPU memory reserved: {torch.cuda.memory_reserved() / 1024**3:.2f} GB")