File size: 3,349 Bytes
8aa674c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import copy
import torch
import torch.nn as nn
from ...builder import TRANSFORMERS
from ..layers import BaseTransformerLayer, MultiheadAttention, FFN


@TRANSFORMERS.register_module()
class DETRDecoder(nn.Module):
    def __init__(
        self,
        embed_dim=256,
        num_heads=8,
        attn_dropout=0.1,
        ffn_dim=2048,
        ffn_dropout=0.1,
        num_layers=6,
        post_norm=True,
        batch_first=True,
        return_intermediate=True,
    ):
        super().__init__()

        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.attn_dropout = attn_dropout
        self.ffn_dim = ffn_dim
        self.ffn_dropout = ffn_dropout
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.return_intermediate = return_intermediate

        self._init_transformer_layers()

        if post_norm:
            self.post_norm_layer = nn.LayerNorm(self.embed_dim)
        else:
            self.post_norm_layer = None

    def _init_transformer_layers(self):
        transformer_layers = BaseTransformerLayer(
            attn=MultiheadAttention(
                embed_dim=self.embed_dim,
                num_heads=self.num_heads,
                attn_drop=self.attn_dropout,
                batch_first=self.batch_first,
            ),
            ffn=FFN(
                embed_dim=self.embed_dim,
                ffn_dim=self.ffn_dim,
                ffn_drop=self.ffn_dropout,
            ),
            norm=nn.LayerNorm(
                normalized_shape=self.embed_dim,
            ),
            operation_order=("self_attn", "norm", "cross_attn", "norm", "ffn", "norm"),
        )
        self.layers = nn.ModuleList([copy.deepcopy(transformer_layers) for _ in range(self.num_layers)])

    def forward(
        self,
        query,
        key,
        value,
        query_pos=None,
        key_pos=None,
        attn_masks=None,
        query_key_padding_mask=None,
        key_padding_mask=None,
        **kwargs,
    ):
        if not self.return_intermediate:
            for layer in self.layers:
                query = layer(
                    query,
                    key,
                    value,
                    query_pos=query_pos,
                    key_pos=key_pos,
                    attn_masks=attn_masks,
                    query_key_padding_mask=query_key_padding_mask,
                    key_padding_mask=key_padding_mask,
                    **kwargs,
                )

            if self.post_norm_layer is not None:
                query = self.post_norm_layer(query)[None]
            return query

        # return intermediate
        intermediate = []
        for layer in self.layers:
            query = layer(
                query,
                key,
                value,
                query_pos=query_pos,
                key_pos=key_pos,
                attn_masks=attn_masks,
                query_key_padding_mask=query_key_padding_mask,
                key_padding_mask=key_padding_mask,
                **kwargs,
            )

            if self.return_intermediate:
                if self.post_norm_layer is not None:
                    intermediate.append(self.post_norm_layer(query))
                else:
                    intermediate.append(query)

        return torch.stack(intermediate)