samuelt0207 commited on
Commit
fffc2b2
·
verified ·
1 Parent(s): d8e5720

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. ckpt_t1.pth +3 -0
  2. ckpt_t1_weight_opt.pth +3 -0
  3. ckpt_t2.pth +3 -0
  4. ckpt_t2_weight_opt.pth +3 -0
  5. config.yaml +92 -0
  6. metadata.pt +3 -0
  7. run.log +0 -0
ckpt_t1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c915a10af6818c4c6e7c572c2626fe6d03ef18eb5b142ffd29b3d3191f85a2c
3
+ size 653284374
ckpt_t1_weight_opt.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32b2b33190fc998c89c75fa43e605707bd6b4b9c9c0f2d8e83f985a3ea3a0d9
3
+ size 653314366
ckpt_t2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8099111d23f4d8b1747cc3e2a2e9c0cd8d32dc8f6e19bf55c9cfbb8c7ed4e48
3
+ size 653284374
ckpt_t2_weight_opt.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba1a840b9e15fbbd90665733d93162d3824afda055dd2f9a20dbb14d8b5988df
3
+ size 653314366
config.yaml ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Quantization configuration for Wan2.2 I2V model
2
+ # W4A4 NVFP4: 4-bit FP4 (E2M1) weights and activations using NVFP4 format
3
+ #
4
+ # Full Q-VDiT approach:
5
+ # - PTQ: Quantize ALL Linear layers with NVFP4 weights (per-channel) and activations (per-token)
6
+ # - TQE: Token-aware Quantization Estimator (LoRA-like correction, already in QuantLayer)
7
+ # - TMD: Temporal Maintenance Distillation (KL divergence on frame similarity)
8
+
9
+ # Layers to keep in full precision (input/output projections + first/last blocks)
10
+ part_fp_list: "./t2v/configs/quant/wan/remain_fp.txt"
11
+
12
+ # Model identification
13
+ model:
14
+ model_id: "wan_i2v_14b"
15
+ model_type: 'wan'
16
+
17
+ # Conditional generation flag
18
+ conditional: True
19
+
20
+ # Calibration data settings (reduced batch_size for Wan 14B due to memory constraints)
21
+ calib_data:
22
+ path: null # Set via command line
23
+ n_steps: 10 # Number of timesteps to use for calibration
24
+ batch_size: 1 # Reduced from 4 for Wan 14B model (saves ~60GB GPU memory)
25
+ n_samples: 3 # Number of samples per timestep
26
+
27
+ # Quantization settings
28
+ quant:
29
+ # Weight quantization (NVFP4 E2M1 per-channel)
30
+ weight:
31
+ quantizer:
32
+ quant_type: 'nvfp4' # Use NVFP4 E2M1 floating-point quantizer
33
+ n_bits: 4
34
+ per_group: 'channel' # Per-channel quantization for weights
35
+ scale_method: 'absmax'
36
+ optimization:
37
+ iters: 1000 # Increased from 200 - sufficient iterations for LoRA/delta convergence
38
+ use_grad: False
39
+ loss:
40
+ # TMD (Temporal Maintenance Distillation) - uses frame-wise similarity preservation
41
+ # Works with Wan's 5D latent tensors [B,C,T,H,W]
42
+ reconstruction_loss_type: 'relation'
43
+ lambda_coeff: 1.0
44
+ b_range: [10, 2]
45
+ warmup: 0.0
46
+ decay_start: 0.0
47
+ p: 2.0
48
+ params:
49
+ delta:
50
+ lr: 1.e-6 # Q-VDiT: 1e-6 for weight scale params
51
+
52
+ # Activation quantization (NVFP4 E2M1 dynamic per-token)
53
+ activation:
54
+ quantizer:
55
+ quant_type: 'nvfp4' # Use NVFP4 E2M1 floating-point quantizer
56
+ n_bits: 4
57
+ per_group: 'token' # Per-token quantization for activations
58
+ dynamic: True # Dynamic quantization (compute scale on-the-fly with STE)
59
+ scale_method: 'absmax'
60
+ # Token count configuration for Wan model
61
+ n_tokens: 5120 # Combined spatial-temporal tokens
62
+ n_text_tokens: 512 # Text encoder sequence length
63
+ n_image_tokens: 257 # CLIP image encoder (256 patches + 1 CLS)
64
+ # Smooth quantization settings
65
+ # NOTE: Using single timerange is more robust for Wan2.2 since each transformer
66
+ # only sees specific timestep ranges. Multiple timeranges risk uncovered ranges.
67
+ smooth_quant:
68
+ enable: True
69
+ channel_wise_scale_type: 'momentum_act_max'
70
+ momentum: 0.95
71
+ alpha: [0.11] # Single alpha for single timerange
72
+ timerange: [[0, 1000]] # Single timerange - simpler and more robust
73
+
74
+ # TQE (Token-aware Quantization Estimator) parameters
75
+ # LoRA-like low-rank correction already implemented in QuantLayer
76
+ tqe:
77
+ lr: 1.e-5 # Q-VDiT: 1e-5 for TQE (LoRA) params
78
+ # Memory optimization: process layers in batches during M initialization
79
+ layer_batch_size: 50 # Number of layers to process at once (reduces peak memory)
80
+ # Optional: filter layers for TQE M initialization
81
+ # 'attention' = only init M for attention layers (.attn1., .attn2.)
82
+ # null = init M for all QuantLayers (default)
83
+ layer_filter: 'attention' # Reduces from ~1200 layers to ~240 layers
84
+
85
+ # Gradient checkpointing (recommended for 14B model)
86
+ grad_checkpoint: True
87
+
88
+ # Timestep-wise quantization (optional)
89
+ timestep_wise: False
90
+
91
+ # CFG (Classifier-Free Guidance) split handling
92
+ cfg_split: False
metadata.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c21b58d4a3c2cd1170769d839d2083c1296c2bccf8a5e8a0a726e3de11f55937
3
+ size 2020
run.log ADDED
The diff for this file is too large to render. See raw diff