32B-fp8 / recipe.yaml
dphnAI's picture
Upload folder using huggingface_hub
be9d4fd verified
quant_stage:
quant_modifiers:
QuantizationModifier:
config_groups:
group0:
targets: [Linear]
weights:
num_bits: 8
type: float
symmetric: true
group_size: null
strategy: channel
block_structure: null
dynamic: false
actorder: null
scale_dtype: null
zp_dtype: null
observer: mse
observer_kwargs: {}
input_activations:
num_bits: 8
type: float
symmetric: true
group_size: null
strategy: token
block_structure: null
dynamic: true
actorder: null
scale_dtype: null
zp_dtype: null
observer: null
observer_kwargs: {}
output_activations: null
format: null
targets: [Linear]
ignore: [lm_head, 're:.*gate$']
bypass_divisibility_checks: false