default_stage: default_modifiers: QuantizationModifier: targets: [Linear] ignore: [lm_head, model.embed_tokens, 're:.*input_layernorm$', 're:.*post_attention_layernorm$', model.norm] scheme: FP8_DYNAMIC