| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import os |
| |
|
| | import pytest |
| | from transformers.utils import is_flash_attn_2_available |
| |
|
| |
|
| | |
| | try: |
| | from transformers.utils import is_torch_sdpa_available |
| | except ImportError: |
| |
|
| | def is_torch_sdpa_available(): |
| | return True |
| |
|
| |
|
| | from llamafactory.extras.packages import is_transformers_version_greater_than |
| | from llamafactory.train.test_utils import load_infer_model |
| |
|
| |
|
| | TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3") |
| |
|
| | INFER_ARGS = { |
| | "model_name_or_path": TINY_LLAMA3, |
| | "template": "llama3", |
| | } |
| |
|
| |
|
| | @pytest.mark.xfail(is_transformers_version_greater_than("4.48"), reason="Attention refactor.") |
| | def test_attention(): |
| | attention_available = ["disabled"] |
| | if is_torch_sdpa_available(): |
| | attention_available.append("sdpa") |
| |
|
| | if is_flash_attn_2_available(): |
| | attention_available.append("fa2") |
| |
|
| | llama_attention_classes = { |
| | "disabled": "LlamaAttention", |
| | "sdpa": "LlamaSdpaAttention", |
| | "fa2": "LlamaFlashAttention2", |
| | } |
| | for requested_attention in attention_available: |
| | model = load_infer_model(flash_attn=requested_attention, **INFER_ARGS) |
| | for module in model.modules(): |
| | if "Attention" in module.__class__.__name__: |
| | assert module.__class__.__name__ == llama_attention_classes[requested_attention] |
| |
|