|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import TYPE_CHECKING |
|
|
|
|
|
from ...extras import logging |
|
|
from ...extras.constants import AttentionFunction |
|
|
|
|
|
|
|
|
if TYPE_CHECKING: |
|
|
from transformers import PretrainedConfig |
|
|
|
|
|
from ...hparams import ModelArguments |
|
|
|
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
def configure_attn_implementation(config: "PretrainedConfig", model_args: "ModelArguments") -> None: |
|
|
from transformers.utils import is_flash_attn_2_available |
|
|
|
|
|
if getattr(config, "model_type", None) == "gemma2": |
|
|
if model_args.flash_attn == AttentionFunction.AUTO or model_args.flash_attn == AttentionFunction.FA2: |
|
|
if is_flash_attn_2_available(): |
|
|
if model_args.flash_attn != AttentionFunction.FA2: |
|
|
logger.warning_rank0("Gemma 2 should use flash attention 2, change `flash_attn` to fa2.") |
|
|
model_args.flash_attn = AttentionFunction.FA2 |
|
|
else: |
|
|
logger.warning_rank0("FlashAttention-2 is not installed, use eager attention.") |
|
|
model_args.flash_attn = AttentionFunction.DISABLED |
|
|
elif model_args.flash_attn == AttentionFunction.SDPA: |
|
|
logger.warning_rank0( |
|
|
"Gemma-2 should use soft-capping attention, while the SDPA attention does not support it." |
|
|
) |
|
|
|
|
|
if model_args.flash_attn == AttentionFunction.AUTO: |
|
|
return |
|
|
|
|
|
elif model_args.flash_attn == AttentionFunction.DISABLED: |
|
|
requested_attn_implementation = "eager" |
|
|
|
|
|
elif model_args.flash_attn == AttentionFunction.SDPA: |
|
|
from transformers.utils import is_torch_sdpa_available |
|
|
|
|
|
if not is_torch_sdpa_available(): |
|
|
logger.warning_rank0("torch>=2.1.1 is required for SDPA attention.") |
|
|
return |
|
|
|
|
|
requested_attn_implementation = "sdpa" |
|
|
elif model_args.flash_attn == AttentionFunction.FA2: |
|
|
if not is_flash_attn_2_available(): |
|
|
logger.warning_rank0("FlashAttention-2 is not installed.") |
|
|
return |
|
|
|
|
|
requested_attn_implementation = "flash_attention_2" |
|
|
else: |
|
|
raise NotImplementedError(f"Unknown attention type: {model_args.flash_attn}") |
|
|
|
|
|
if getattr(config, "model_type", None) == "internlm2": |
|
|
setattr(config, "attn_implementation", requested_attn_implementation) |
|
|
elif getattr(config, "model_type", None) == "kimi_vl": |
|
|
setattr(config.vision_config, "_attn_implementation", requested_attn_implementation) |
|
|
setattr(config.text_config, "_attn_implementation", requested_attn_implementation) |
|
|
else: |
|
|
setattr(config, "_attn_implementation", requested_attn_implementation) |
|
|
|
|
|
|
|
|
def print_attn_implementation(config: "PretrainedConfig") -> None: |
|
|
if getattr(config, "model_type", None) == "internlm2": |
|
|
attn_implementation = getattr(config, "attn_implementation", None) |
|
|
else: |
|
|
attn_implementation = getattr(config, "_attn_implementation", None) |
|
|
|
|
|
if attn_implementation == "flash_attention_2": |
|
|
logger.info_rank0("Using FlashAttention-2 for faster training and inference.") |
|
|
elif attn_implementation == "sdpa": |
|
|
logger.info_rank0("Using torch SDPA for faster training and inference.") |
|
|
else: |
|
|
logger.info_rank0("Using vanilla attention implementation.") |
|
|
|