File size: 4,662 Bytes
8aa674c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import torch.nn as nn
import torch.nn.functional as F
from .actionformer_proj import get_sinusoid_encoding
from ..builder import PROJECTIONS
from ..bricks import ConvModule


@PROJECTIONS.register_module()
class ConvSingleProj(nn.Module):
    def __init__(
        self,
        in_channels,
        out_channels,
        num_convs,
        conv_cfg=None,
        norm_cfg=None,
        act_cfg=dict(type="relu"),
        drop_out=None,
    ):
        super().__init__()
        assert num_convs > 0
        self.drop_out = nn.Dropout1d(p=drop_out) if drop_out is not None else None

        self.convs = nn.ModuleList()
        for i in range(num_convs):
            self.convs.append(
                ConvModule(
                    in_channels if i == 0 else out_channels,
                    out_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1,
                    conv_cfg=conv_cfg,
                    norm_cfg=norm_cfg,
                    act_cfg=act_cfg,
                )
            )

    def forward(self, x, mask):
        # x shape [B,C,T], mask [B,T]

        if self.drop_out is not None:
            x = self.drop_out(x)

        for conv in self.convs:
            x, mask = conv(x, mask)
        return x, mask


@PROJECTIONS.register_module()
class ConvPyramidProj(nn.Module):
    def __init__(
        self,
        in_channels,
        out_channels,
        arch=(2, 5),  # (stem convs, downsample levels)
        conv_cfg=None,
        norm_cfg=dict(type="LN"),
        drop_out=0.0,
        drop_path=0.0,
        use_abs_pe=False,
        max_seq_len=-1,
    ):
        super().__init__()

        assert len(arch) == 2
        assert arch[1] > 0
        self.arch = arch
        self.use_abs_pe = use_abs_pe
        self.max_seq_len = max_seq_len

        self.drop_out = nn.Dropout1d(p=drop_out) if drop_out > 0 else None

        # projection convs without downsampling
        self.stem_convs = nn.ModuleList()
        for i in range(arch[0]):
            self.stem_convs.append(
                ConvModule(
                    in_channels if i == 0 else out_channels,
                    out_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1,
                    conv_cfg=conv_cfg,
                    norm_cfg=norm_cfg,
                    act_cfg=dict(type="relu"),
                )
            )

        # position embedding (1, C, T), rescaled by 1/sqrt(n_embed)
        if self.use_abs_pe:
            pos_embed = get_sinusoid_encoding(self.max_seq_len, out_channels) / (out_channels**0.5)
            self.register_buffer("pos_embed", pos_embed, persistent=False)

        # downsampling for pyramid feature
        self.downsampling = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)

        # convs between each level
        self.pyramid_convs = nn.ModuleList()
        for _ in range(arch[1] + 1):
            self.pyramid_convs.append(
                ConvModule(
                    out_channels,
                    out_channels,
                    kernel_size=3,
                    stride=1,
                    padding=1,
                    conv_cfg=conv_cfg,
                    norm_cfg=norm_cfg,
                    act_cfg=dict(type="relu"),
                )
            )

    def forward(self, x, mask):
        # x shape [B,C,T], mask [B,T]
        if self.drop_out is not None:
            x = self.drop_out(x)

        # stem convs without downsampling
        for conv in self.stem_convs:
            x, mask = conv(x, mask)

        # add position embedding
        if self.use_abs_pe:
            if self.training:
                assert x.shape[-1] <= self.max_seq_len, "Reached max length."
                pe = self.pos_embed
                # add pe to x
                x = x + pe[:, :, : x.shape[-1]] * mask.unsqueeze(1).to(x.dtype)
            else:
                if x.shape[-1] >= self.max_seq_len:
                    pe = F.interpolate(self.pos_embed, x.shape[-1], mode="linear", align_corners=False)
                else:
                    pe = self.pos_embed
                x = x + pe[:, :, : x.shape[-1]] * mask.unsqueeze(1).to(x.dtype)

        # downsampling and saving to output
        out, out_mask = [], []
        for level in range(self.arch[1] + 1):
            if level > 0:
                mask = self.downsampling(mask.float()).bool()
                x = self.downsampling(x) * mask.unsqueeze(1).to(x.dtype)

            x, mask = self.pyramid_convs[level](x, mask)
            out.append(x)
            out_mask.append(mask)
        return out, out_mask