File size: 5,505 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import torch.nn as nn
import torch
import torch.nn.functional as F

from modules.build import HEADS_REGISTRY
from modules.utils import get_mlp_head


class FC(nn.Module):
    def __init__(self, in_size, out_size, pdrop=0., use_gelu=True):
        super(FC, self).__init__()
        self.pdrop = pdrop
        self.use_gelu = use_gelu
        self.linear = nn.Linear(in_size, out_size)
        if use_gelu:
            # self.relu = nn.Relu(inplace=True)
            self.gelu = nn.GELU()
        if pdrop > 0:
            self.dropout = nn.Dropout(pdrop)

    def forward(self, x):
        x = self.linear(x)
        if self.use_gelu:
            # x = self.relu(x)
            x = self.gelu(x)
        if self.pdrop > 0:
            x = self.dropout(x)
        return x


class MLP(nn.Module):
    def __init__(self, in_size, mid_size, out_size, pdrop=0., use_gelu=True):
        super().__init__()
        self.fc = FC(in_size, mid_size, pdrop=pdrop, use_gelu=use_gelu)
        self.linear = nn.Linear(mid_size, out_size)

    def forward(self, x):
        return self.linear(self.fc(x))


class AttFlat(nn.Module):
    def __init__(self, hidden_size, flat_mlp_size=512, flat_glimpses=1, flat_out_size=1024, pdrop=0.1):
        super().__init__()
        self.mlp = MLP(
            in_size=hidden_size,
            mid_size=flat_mlp_size,
            out_size=flat_glimpses,
            pdrop=pdrop,
            use_gelu=True
        )
        self.flat_glimpses = flat_glimpses
        self.linear_merge = nn.Linear(
            hidden_size * flat_glimpses,
            flat_out_size
        )

    def forward(self, x, x_mask):
        att = self.mlp(x)
        if x_mask is not None:
            # att = att.masked_fill(x_mask.squeeze(1).squeeze(1).unsqueeze(2), -1e9)
            att = att.masked_fill(x_mask.unsqueeze(2), -1e9)
        att = F.softmax(att, dim=1)
        att_list = []
        for i in range(self.flat_glimpses):
            att_list.append(
                torch.sum(att[:, :, i: i + 1] * x, dim=1)
            )
        x_atted = torch.cat(att_list, dim=1)
        x_atted = self.linear_merge(x_atted)
        return x_atted


@HEADS_REGISTRY.register()
class GroundHeadV1(nn.Module):
    def __init__(self, cfg, input_size=768, hidden_size=768, sem_cls_size=607, dropout=0.3, detach_all_aux_loss=False, num_answers = 80):
        super().__init__()
        image_embed_dim = 512
        text_embed_dim = 1024
        mlp_size = 256
        glimpse = 1
        flat_out_size = 512 
        
        self.attflat_visual = AttFlat(image_embed_dim, mlp_size, glimpse, flat_out_size, 0.1)
        self.attflat_lang = AttFlat(text_embed_dim, mlp_size, glimpse, flat_out_size, 0.1)
        self.answer_cls = nn.Sequential(
            nn.Linear(flat_out_size, hidden_size),
            nn.GELU(),
            nn.Dropout(0.3),
            nn.Linear(hidden_size, num_answers)
        )
        self.fusion_norm = nn.LayerNorm(flat_out_size)

    def forward(self, pm_embeds, txt_embeds, txt_masks):
        object_feat = self.attflat_visual(pm_embeds, None)
        lang_feat = self.attflat_lang(txt_embeds.float(), txt_masks)
        fuse_feat = self.fusion_norm(lang_feat + object_feat)
        answer_scores = self.answer_cls(fuse_feat)
        return answer_scores
    
# @HEADS_REGISTRY.register()
# class GroundHeadV1(nn.Module):
#     def __init__(self, cfg, input_size=768, hidden_size=768, sem_cls_size=607, dropout=0.3, detach_all_aux_loss=False):
#         super().__init__()
#         self.og3d_head = get_mlp_head(
#             input_size, hidden_size,
#             1, dropout=dropout
#         )
#         self.txt_clf_head = get_mlp_head(
#             input_size, hidden_size,
#             sem_cls_size, dropout=dropout
#         )
#         self.obj3d_clf_head = get_mlp_head(
#             input_size, hidden_size,
#             sem_cls_size, dropout=dropout
#         )
#         self.obj3d_clf_pre_head = get_mlp_head(
#             input_size, hidden_size,
#             sem_cls_size, dropout=dropout
#         )
#         self.detach_all_aux_loss = detach_all_aux_loss


#     def forward(self, txt_embeds, obj_embeds, obj_pre_embeds, obj_masks, **kwargs):
#         og3d_logits = self.og3d_head(obj_embeds).squeeze(2)
#         og3d_logits = og3d_logits.masked_fill_(obj_masks.logical_not(), -float('inf'))
#         if self.detach_all_aux_loss:
#             txt_embeds = txt_embeds.detach()
#             obj_embeds = obj_embeds.detach()
#             obj_pre_embeds = obj_pre_embeds.detach()
#         txt_cls_logits = self.txt_clf_head(txt_embeds[:, 0])
#         obj_cls_logits = self.obj3d_clf_head(obj_embeds)
#         obj_cls_pre_logits = self.obj3d_clf_pre_head(obj_pre_embeds)
#         return txt_cls_logits, obj_cls_logits, obj_cls_pre_logits, og3d_logits

#     def forward(self, pm_embeds, txt_embeds):
#          og3d_logits = self.og3d_head(pm_embeds).squeeze(2)
#          return og3d_logits

@HEADS_REGISTRY.register()
class GroundHead(nn.Module):
    def __init__(self, cfg, input_size=768, hidden_size=768, dropout=0.3):
        super().__init__()
        self.og3d_head = get_mlp_head(
            input_size, hidden_size,
            1, dropout=dropout
        )

    def forward(self, obj_embeds, obj_masks=None, **kwargs):
        og3d_logits = self.og3d_head(obj_embeds).squeeze(2)
        if obj_masks is not None:
            og3d_logits = og3d_logits.masked_fill_(obj_masks.logical_not(), -float('inf'))
        return og3d_logits