| # # -*- coding: utf-8 -*- | |
| # # Copyright (c) 2023-2025, Songlin Yang, Yu Zhang | |
| # from .abc import ABCAttention | |
| # from .attn import Attention | |
| # from .based import BasedLinearAttention | |
| # from .bitattn import BitAttention | |
| # from .delta_net import DeltaNet | |
| # from .forgetting_attn import ForgettingAttention | |
| # from .gated_deltanet import GatedDeltaNet | |
| # from .gated_deltaproduct import GatedDeltaProduct | |
| # from .gla import GatedLinearAttention | |
| # from .gsa import GatedSlotAttention | |
| # from .hgrn import HGRNAttention | |
| # from .hgrn2 import HGRN2Attention | |
| # from .lightnet import LightNetAttention | |
| # from .linear_attn import LinearAttention | |
| # from .mamba import Mamba | |
| # from .mamba2 import Mamba2 | |
| # from .multiscale_retention import MultiScaleRetention | |
| # from .nsa import NativeSparseAttention | |
| # from .path_attn import PaTHAttention | |
| # from .rebased import ReBasedLinearAttention | |
| # from .rwkv6 import RWKV6Attention | |
| # from .rwkv7 import RWKV7Attention | |
| # __all__ = [ | |
| # 'ABCAttention', | |
| # 'Attention', | |
| # 'BasedLinearAttention', | |
| # 'BitAttention', | |
| # 'DeltaNet', | |
| # 'ForgettingAttention', | |
| # 'GatedDeltaNet', | |
| # 'GatedDeltaProduct', | |
| # 'GatedLinearAttention', | |
| # 'GatedSlotAttention', | |
| # 'HGRNAttention', | |
| # 'HGRN2Attention', | |
| # 'LightNetAttention', | |
| # 'LinearAttention', | |
| # 'Mamba', | |
| # 'Mamba2', | |
| # 'MultiScaleRetention', | |
| # 'NativeSparseAttention', | |
| # 'ReBasedLinearAttention', | |
| # 'RWKV6Attention', | |
| # 'RWKV7Attention', | |
| # 'PaTHAttention' | |
| # ] | |
| from .emdeltanet import emdeltanet |