Qwen3.5-9B-OptiQ-4bit / optiq_metadata.json
codelion's picture
Strip vision/audio metadata (text-only OptIQ variant cleanup)
b5b6d4d verified
{
"method": "optiq_mixed_precision",
"target_bpw": 4.5,
"achieved_bpw": 4.5,
"n_high_bits": 92,
"n_low_bits": 157,
"threshold": 0.0,
"per_layer": {
"model.layers.0.linear_attn.out_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.0.linear_attn.in_proj_qkv": {
"bits": 8,
"group_size": 64
},
"model.layers.0.linear_attn.in_proj_z": {
"bits": 8,
"group_size": 64
},
"model.layers.0.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.0.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.0.mlp.gate_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.0.mlp.up_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.0.mlp.down_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.1.linear_attn.out_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.1.linear_attn.in_proj_qkv": {
"bits": 8,
"group_size": 64
},
"model.layers.1.linear_attn.in_proj_z": {
"bits": 8,
"group_size": 64
},
"model.layers.1.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.1.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.1.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.1.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.1.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.2.linear_attn.out_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.2.linear_attn.in_proj_qkv": {
"bits": 8,
"group_size": 64
},
"model.layers.2.linear_attn.in_proj_z": {
"bits": 8,
"group_size": 64
},
"model.layers.2.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.2.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.2.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.2.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.2.mlp.down_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.3.self_attn.q_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.3.self_attn.k_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.3.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.3.self_attn.o_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.3.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.3.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.3.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.4.linear_attn.out_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.4.linear_attn.in_proj_qkv": {
"bits": 8,
"group_size": 64
},
"model.layers.4.linear_attn.in_proj_z": {
"bits": 8,
"group_size": 64
},
"model.layers.4.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.4.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.4.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.4.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.4.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.5.linear_attn.out_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.5.linear_attn.in_proj_qkv": {
"bits": 8,
"group_size": 64
},
"model.layers.5.linear_attn.in_proj_z": {
"bits": 8,
"group_size": 64
},
"model.layers.5.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.5.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.5.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.5.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.5.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.6.linear_attn.out_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.6.linear_attn.in_proj_qkv": {
"bits": 8,
"group_size": 64
},
"model.layers.6.linear_attn.in_proj_z": {
"bits": 8,
"group_size": 64
},
"model.layers.6.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.6.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.6.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.6.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.6.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.7.self_attn.q_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.7.self_attn.k_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.7.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.7.self_attn.o_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.7.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.7.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.7.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.8.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.8.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.8.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.8.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.8.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.8.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.8.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.8.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.9.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.9.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.9.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.9.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.9.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.9.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.9.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.9.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.10.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.10.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.10.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.10.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.10.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.10.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.10.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.10.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.11.self_attn.q_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.11.self_attn.k_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.11.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.11.self_attn.o_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.11.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.11.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.11.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.12.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.12.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.12.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.12.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.12.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.12.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.12.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.12.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.13.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.13.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.13.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.13.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.13.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.13.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.13.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.13.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.14.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.14.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.14.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.14.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.14.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.14.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.14.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.14.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.15.self_attn.q_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.15.self_attn.k_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.15.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.15.self_attn.o_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.15.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.15.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.15.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.16.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.16.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.16.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.16.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.16.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.16.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.16.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.16.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.17.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.17.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.17.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.17.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.17.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.17.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.17.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.17.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.18.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.18.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.18.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.18.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.18.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.18.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.18.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.18.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.19.self_attn.q_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.19.self_attn.k_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.19.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.19.self_attn.o_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.19.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.19.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.19.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.20.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.20.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.20.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.20.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.20.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.20.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.20.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.20.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.21.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.21.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.21.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.21.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.21.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.21.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.21.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.21.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.22.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.22.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.22.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.22.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.22.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.22.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.22.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.22.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.23.self_attn.q_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.23.self_attn.k_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.23.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.23.self_attn.o_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.23.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.23.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.23.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.24.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.24.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.24.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.24.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.24.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.24.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.24.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.24.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.25.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.25.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.25.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.25.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.25.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.25.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.25.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.25.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.26.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.26.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.26.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.26.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.26.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.26.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.26.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.26.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.27.self_attn.q_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.27.self_attn.k_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.27.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.27.self_attn.o_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.27.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.27.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.27.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.28.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.28.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.28.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.28.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.28.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.28.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.28.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.28.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.29.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.29.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.29.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.29.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.29.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.29.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.29.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.29.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.30.linear_attn.out_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.30.linear_attn.in_proj_qkv": {
"bits": 4,
"group_size": 64
},
"model.layers.30.linear_attn.in_proj_z": {
"bits": 4,
"group_size": 64
},
"model.layers.30.linear_attn.in_proj_b": {
"bits": 8,
"group_size": 64
},
"model.layers.30.linear_attn.in_proj_a": {
"bits": 8,
"group_size": 64
},
"model.layers.30.mlp.gate_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.30.mlp.up_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.30.mlp.down_proj": {
"bits": 4,
"group_size": 64
},
"model.layers.31.self_attn.q_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.31.self_attn.k_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.31.self_attn.v_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.31.self_attn.o_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.31.mlp.gate_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.31.mlp.up_proj": {
"bits": 8,
"group_size": 64
},
"model.layers.31.mlp.down_proj": {
"bits": 8,
"group_size": 64
},
"lm_head": {
"bits": 4,
"group_size": 64
}
},
"post_processing": [
{
"op": "strip_vision",
"architectures": {
"from": [
"Qwen3_5ForConditionalGeneration"
],
"to": [
"Qwen3_5ForCausalLM"
]
},
"flattened_text_config": true,
"dropped_keys": [
"image_token_id",
"video_token_id",
"vision_end_token_id",
"vision_start_token_id"
]
}
]
}