File size: 3,432 Bytes
02e9762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import pdb
import torch
from transformers import CLIPTokenizer, CLIPTextModel
import torch.nn as nn


class AbstractEncoder(nn.Module):
    def __init__(self):
        super().__init__()

    def encode(self, *args, **kwargs):
        raise NotImplementedError


ATTRIBUTE_MAPPING = {
    1: 'eye',
    2: 'lip',
    3: 'hair',
    4: 'glasses',
    5: 'hat',
    6: 'eyebrow'
}

ORDER_MAPPING = {
    1: 'first',
    2: 'second',
    3: 'third',
    4: 'last'
}


class FrozenCLIPEmbedder(AbstractEncoder):
    """Uses the CLIP transformer encoder for text (from Hugging Face)"""
    def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=16):
        super().__init__()
        self.tokenizer = CLIPTokenizer.from_pretrained(version)
        self.transformer = CLIPTextModel.from_pretrained(version)
        self.device = device
        self.max_length = max_length
        self.freeze()

    def freeze(self):
        self.transformer = self.transformer.eval()
        for param in self.parameters():
            param.requires_grad = False

    def forward(self, text):
        assert torch.is_tensor(text)
        input_shape = text.shape    # .shape: [bs, 5]
        # text = self.label_mapping(text)     # len(text): bs * 5
        batch_encoding = self.tokenizer(self.label_mapping(text), truncation=False, return_length=True,
                                        return_overflowing_tokens=False, padding=True, return_tensors="pt")
        tokens = batch_encoding["input_ids"].to(self.device)
        # outputs = self.transformer(input_ids=tokens).pooler_output
        outputs = self.transformer(input_ids=tokens).last_hidden_state[:, 1, :]     # .shape: [bs * 5, 768]

        # pdb.set_trace()
        # outputs = torch.sum(outputs, dim=1)
        # PAD 置零
        indices = (text.contiguous().view(-1) == 8).nonzero(as_tuple=True)[0]
        outputs[indices] = 0

        outputs = outputs.contiguous().view(input_shape[0], input_shape[1], -1)
        return outputs
        # outputs = self.transformer(input_ids=tokens)

        # z = outputs.last_hidden_state   # .shape: [bs, max_length, 768]
        # return z

    def label_mapping(self, batch_sequence):
        max_sequence_length = batch_sequence.shape[1]
        # pdb.set_trace()
        
        batch_sequence = batch_sequence.contiguous().view(-1).tolist()    # .shape: [bs * 5]

        sentences = []
        for i, attribute in enumerate(batch_sequence):
            # pdb.set_trace()
            if attribute == 0:
                sentence = "START"
            elif attribute == 7:
                sentence = "END"
            elif attribute == 8:    # TEST sequence : [SOS, PAD, PAD, PAD]
                sentence = "PAD"   # [bos, pad, pad]  [40906, 40907]
            else:
                if i < len(batch_sequence) - 1 and batch_sequence[i + 1] == 8:
                    order = 'last'
                else:
                    order = ORDER_MAPPING[i % max_sequence_length]
                # sentence = f"The {ATTRIBUTE_MAPPING[attribute]} is edited {order}"
                sentence = f"{ATTRIBUTE_MAPPING[attribute]} {order}"
            sentences.append(sentence)

        return sentences


    def encode(self, text):
        return self(text)
    


if __name__ == "__main__":
    model = FrozenCLIPEmbedder().cuda()
    model(torch.tensor([[0, 1, 2, 3, 4], [0, 3, 2, 8, 8], [0, 1, 5, 6, 8]]).cuda())