File size: 5,706 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import torch.nn as nn
import torch.nn.functional as F
from fvcore.common.registry import Registry
import torch

LOSS_REGISTRY = Registry("loss")

def og3d_loss(data_dict):
    return F.cross_entropy(data_dict["og3d_logits"], data_dict["tgt_object_id"].squeeze(1))


def og3d_multi_loss(data_dict):
    return F.binary_cross_entropy_with_logits(
        data_dict["og3d_logits"],
        data_dict["tgt_object_id"].float(),
        reduction="sum") / float(data_dict["tgt_object_id"].shape[0])


def txt_cls_multi_loss(data_dict):
    return F.binary_cross_entropy_with_logits(
        data_dict["txt_cls_logits"],
        data_dict["tgt_object_label"].float(),
        reduction='sum') / float(data_dict["tgt_object_label"].shape[0])


def obj_cls_raw_loss(data_dict):
    return (
        F.cross_entropy(
            data_dict["obj_cls_raw_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none'
        ) * data_dict["obj_masks"]
    ).sum() / data_dict["obj_masks"].sum()


def obj_cls_pre_loss(data_dict):
    return (
        F.cross_entropy(
            data_dict["obj_cls_pre_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none'
        ) * data_dict["obj_masks"]
    ).sum() / data_dict["obj_masks"].sum()


def obj_cls_post_loss(data_dict):
    return (
        F.cross_entropy(
            data_dict["obj_cls_post_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none'
        ) * data_dict["obj_masks"]
    ).sum() / data_dict["obj_masks"].sum()


def answer_loss(data_dict):
    return F.binary_cross_entropy_with_logits(
            data_dict["answer_scores"], data_dict["answer_label"].float(), reduction='sum'
        ) / data_dict["answer_scores"].shape[0]


def lm_cls_loss(data_dict):
    target_labels = data_dict["masked_lm_labels"]
    target_labels = target_labels.view(-1, target_labels.size(-1)) if len(target_labels.size()) == 3 else target_labels
    return F.cross_entropy(
            data_dict["txt_lm_cls_logits"].permute(0, 2, 1), target_labels, ignore_index=-1
        )


def obj_cls_pre_loss_mask(data_dict):
    return (
        F.cross_entropy(
            data_dict["obj_cls_pre_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none'
        ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not()
    ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not()).sum()


def obj_cls_pre_loss_unmask(data_dict):
    return (
        F.cross_entropy(
            data_dict["obj_cls_pre_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none'
        ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"]
    ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"]).sum()


def obj_cls_post_loss_mask(data_dict):
    return (
        F.cross_entropy(
            data_dict["obj_cls_post_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none'
        ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not()
    ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"].logical_not()).sum()


def obj_cls_post_loss_unmask(data_dict):
    return (
        F.cross_entropy(
            data_dict["obj_cls_post_logits"].permute(0, 2, 1), data_dict["obj_labels"], reduction='none'
        ) * data_dict["obj_masks"] * data_dict["obj_sem_masks"]
    ).sum() / (data_dict["obj_masks"] * data_dict["obj_sem_masks"]).sum()


def obj_cls_loss(data_dict, smoothing=0.3):
    return (
        F.cross_entropy(
            data_dict["obj_logits"].permute(0, 2, 1), data_dict["obj_labels"],
            reduction='none', label_smoothing=smoothing
        ) * data_dict["obj_masks"]
    ).sum() / data_dict["obj_masks"].sum()


def mse_loss(data_dict):
    return (
        ((data_dict["pred_images"] - data_dict["target_images"]) ** 2).mean()
    )

class Loss(nn.Module):
    def __init__(self, cfg, accelerator):
        # e.g.  refer_loss_v1: ["og3d_loss", "txt_cls_loss", "obj_cls_raw_loss", "obj_cls_pre_loss", "obj_cls_post_loss"]
        #       qa_loss_v1: ["og3d_loss", "txt_cls_loss", "obj_cls_raw_loss", "obj_cls_pre_loss", "obj_cls_post_loss", "answer_loss"]
        #       pretrain_loss_v1: ["lm_cls_loss", "obj_cls_raw_loss", "obj_cls_pre_loss", "obj_cls_post_loss", "obj_cls_pre_loss_mask",
        #                           "obj_cls_pre_loss_unmask", "obj_cls_post_loss_mask", "obj_cls_post_loss_unmask"]
        super().__init__()
        self.all_keys = list(set(cfg.model.vis_loss_list + cfg.model.loss_list))
        self.selected_keys = cfg.model.loss_list

        self.loss_fn = {}
        for k in self.all_keys:
            if k in globals().keys():
                self.loss_fn[k] = globals()[k]
                print(f"Using {k} from loss.globals()")
            else:
                self.loss_fn[k] = LOSS_REGISTRY.get(k)(cfg, accelerator)
                setattr(self, k, self.loss_fn[k]) # register the loss module, otherwise its parameters will not be the same device as the model
                print(f"Using {k} from Registry {LOSS_REGISTRY._name}")

    def forward(self, data_dict):
        all_losses = {}

        # Precompute label if needed
        if 'txt_cls_loss' in self.loss_fn and 'txt_cls_label' not in data_dict:
            data_dict['txt_cls_label'] = data_dict["tgt_object_label"].squeeze(1)

        for k, fn in self.loss_fn.items():
            # Compute current loss
            cur_loss = fn(data_dict)

            if isinstance(cur_loss, dict):
                all_losses.update(cur_loss)
            else:
                all_losses[k] = cur_loss

        total_loss = sum(all_losses.values())
        all_losses["total_loss"] = total_loss

        return total_loss, all_losses