| # # -*- coding: utf-8 -*- | |
| # from .abc import ABCAttention | |
| # from .attn import Attention | |
| # from .based import BasedLinearAttention | |
| # from .delta_net import DeltaNet | |
| # from .gla import GatedLinearAttention | |
| # from .hgrn import HGRNAttention | |
| # from .hgrn2 import HGRN2Attention | |
| # from .linear_attn import LinearAttention | |
| # from .multiscale_retention import MultiScaleRetention | |
| # from .rebased import ReBasedLinearAttention | |
| # from .rwkv6 import RWKV6Attention | |
| # __all__ = [ | |
| # 'ABCAttention', | |
| # 'Attention', | |
| # 'BasedLinearAttention', | |
| # 'DeltaNet', | |
| # 'GatedLinearAttention', | |
| # 'HGRNAttention', | |
| # 'HGRN2Attention', | |
| # 'LinearAttention', | |
| # 'MultiScaleRetention', | |
| # 'ReBasedLinearAttention', | |
| # 'RWKV6Attention' | |
| # ] | |
| from .emla import emla | |
| from .emgla import emgla | |
| from .mask_deltanet import mask_deltanet | |
| from .mask_gdn import mask_gdn |