q-vdit-quant-output / config.yaml
samuelt0207's picture
Upload folder using huggingface_hub
fffc2b2 verified
# Quantization configuration for Wan2.2 I2V model
# W4A4 NVFP4: 4-bit FP4 (E2M1) weights and activations using NVFP4 format
#
# Full Q-VDiT approach:
# - PTQ: Quantize ALL Linear layers with NVFP4 weights (per-channel) and activations (per-token)
# - TQE: Token-aware Quantization Estimator (LoRA-like correction, already in QuantLayer)
# - TMD: Temporal Maintenance Distillation (KL divergence on frame similarity)
# Layers to keep in full precision (input/output projections + first/last blocks)
part_fp_list: "./t2v/configs/quant/wan/remain_fp.txt"
# Model identification
model:
model_id: "wan_i2v_14b"
model_type: 'wan'
# Conditional generation flag
conditional: True
# Calibration data settings (reduced batch_size for Wan 14B due to memory constraints)
calib_data:
path: null # Set via command line
n_steps: 10 # Number of timesteps to use for calibration
batch_size: 1 # Reduced from 4 for Wan 14B model (saves ~60GB GPU memory)
n_samples: 3 # Number of samples per timestep
# Quantization settings
quant:
# Weight quantization (NVFP4 E2M1 per-channel)
weight:
quantizer:
quant_type: 'nvfp4' # Use NVFP4 E2M1 floating-point quantizer
n_bits: 4
per_group: 'channel' # Per-channel quantization for weights
scale_method: 'absmax'
optimization:
iters: 1000 # Increased from 200 - sufficient iterations for LoRA/delta convergence
use_grad: False
loss:
# TMD (Temporal Maintenance Distillation) - uses frame-wise similarity preservation
# Works with Wan's 5D latent tensors [B,C,T,H,W]
reconstruction_loss_type: 'relation'
lambda_coeff: 1.0
b_range: [10, 2]
warmup: 0.0
decay_start: 0.0
p: 2.0
params:
delta:
lr: 1.e-6 # Q-VDiT: 1e-6 for weight scale params
# Activation quantization (NVFP4 E2M1 dynamic per-token)
activation:
quantizer:
quant_type: 'nvfp4' # Use NVFP4 E2M1 floating-point quantizer
n_bits: 4
per_group: 'token' # Per-token quantization for activations
dynamic: True # Dynamic quantization (compute scale on-the-fly with STE)
scale_method: 'absmax'
# Token count configuration for Wan model
n_tokens: 5120 # Combined spatial-temporal tokens
n_text_tokens: 512 # Text encoder sequence length
n_image_tokens: 257 # CLIP image encoder (256 patches + 1 CLS)
# Smooth quantization settings
# NOTE: Using single timerange is more robust for Wan2.2 since each transformer
# only sees specific timestep ranges. Multiple timeranges risk uncovered ranges.
smooth_quant:
enable: True
channel_wise_scale_type: 'momentum_act_max'
momentum: 0.95
alpha: [0.11] # Single alpha for single timerange
timerange: [[0, 1000]] # Single timerange - simpler and more robust
# TQE (Token-aware Quantization Estimator) parameters
# LoRA-like low-rank correction already implemented in QuantLayer
tqe:
lr: 1.e-5 # Q-VDiT: 1e-5 for TQE (LoRA) params
# Memory optimization: process layers in batches during M initialization
layer_batch_size: 50 # Number of layers to process at once (reduces peak memory)
# Optional: filter layers for TQE M initialization
# 'attention' = only init M for attention layers (.attn1., .attn2.)
# null = init M for all QuantLayers (default)
layer_filter: 'attention' # Reduces from ~1200 layers to ~240 layers
# Gradient checkpointing (recommended for 14B model)
grad_checkpoint: True
# Timestep-wise quantization (optional)
timestep_wise: False
# CFG (Classifier-Free Guidance) split handling
cfg_split: False