SEED_balanced / FAITH /models /FrozenCLIPTextEncoder.py
Mengieong's picture
Upload 253 files
02e9762 verified
import pdb
import torch
from transformers import CLIPTokenizer, CLIPTextModel
import torch.nn as nn
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
ATTRIBUTE_MAPPING = {
1: 'eye',
2: 'lip',
3: 'hair',
4: 'glasses',
5: 'hat',
6: 'eyebrow'
}
ORDER_MAPPING = {
1: 'first',
2: 'second',
3: 'third',
4: 'last'
}
class FrozenCLIPEmbedder(AbstractEncoder):
"""Uses the CLIP transformer encoder for text (from Hugging Face)"""
def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=16):
super().__init__()
self.tokenizer = CLIPTokenizer.from_pretrained(version)
self.transformer = CLIPTextModel.from_pretrained(version)
self.device = device
self.max_length = max_length
self.freeze()
def freeze(self):
self.transformer = self.transformer.eval()
for param in self.parameters():
param.requires_grad = False
def forward(self, text):
assert torch.is_tensor(text)
input_shape = text.shape # .shape: [bs, 5]
# text = self.label_mapping(text) # len(text): bs * 5
batch_encoding = self.tokenizer(self.label_mapping(text), truncation=False, return_length=True,
return_overflowing_tokens=False, padding=True, return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
# outputs = self.transformer(input_ids=tokens).pooler_output
outputs = self.transformer(input_ids=tokens).last_hidden_state[:, 1, :] # .shape: [bs * 5, 768]
# pdb.set_trace()
# outputs = torch.sum(outputs, dim=1)
# PAD 置零
indices = (text.contiguous().view(-1) == 8).nonzero(as_tuple=True)[0]
outputs[indices] = 0
outputs = outputs.contiguous().view(input_shape[0], input_shape[1], -1)
return outputs
# outputs = self.transformer(input_ids=tokens)
# z = outputs.last_hidden_state # .shape: [bs, max_length, 768]
# return z
def label_mapping(self, batch_sequence):
max_sequence_length = batch_sequence.shape[1]
# pdb.set_trace()
batch_sequence = batch_sequence.contiguous().view(-1).tolist() # .shape: [bs * 5]
sentences = []
for i, attribute in enumerate(batch_sequence):
# pdb.set_trace()
if attribute == 0:
sentence = "START"
elif attribute == 7:
sentence = "END"
elif attribute == 8: # TEST sequence : [SOS, PAD, PAD, PAD]
sentence = "PAD" # [bos, pad, pad] [40906, 40907]
else:
if i < len(batch_sequence) - 1 and batch_sequence[i + 1] == 8:
order = 'last'
else:
order = ORDER_MAPPING[i % max_sequence_length]
# sentence = f"The {ATTRIBUTE_MAPPING[attribute]} is edited {order}"
sentence = f"{ATTRIBUTE_MAPPING[attribute]} {order}"
sentences.append(sentence)
return sentences
def encode(self, text):
return self(text)
if __name__ == "__main__":
model = FrozenCLIPEmbedder().cuda()
model(torch.tensor([[0, 1, 2, 3, 4], [0, 3, 2, 8, 8], [0, 1, 5, 6, 8]]).cuda())