| import pdb |
| import torch |
| from transformers import CLIPTokenizer, CLIPTextModel |
| import torch.nn as nn |
|
|
|
|
| class AbstractEncoder(nn.Module): |
| def __init__(self): |
| super().__init__() |
|
|
| def encode(self, *args, **kwargs): |
| raise NotImplementedError |
|
|
|
|
| ATTRIBUTE_MAPPING = { |
| 1: 'eye', |
| 2: 'lip', |
| 3: 'hair', |
| 4: 'glasses', |
| 5: 'hat', |
| 6: 'eyebrow' |
| } |
|
|
| ORDER_MAPPING = { |
| 1: 'first', |
| 2: 'second', |
| 3: 'third', |
| 4: 'last' |
| } |
|
|
|
|
| class FrozenCLIPEmbedder(AbstractEncoder): |
| """Uses the CLIP transformer encoder for text (from Hugging Face)""" |
| def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=16): |
| super().__init__() |
| self.tokenizer = CLIPTokenizer.from_pretrained(version) |
| self.transformer = CLIPTextModel.from_pretrained(version) |
| self.device = device |
| self.max_length = max_length |
| self.freeze() |
|
|
| def freeze(self): |
| self.transformer = self.transformer.eval() |
| for param in self.parameters(): |
| param.requires_grad = False |
|
|
| def forward(self, text): |
| assert torch.is_tensor(text) |
| input_shape = text.shape |
| |
| batch_encoding = self.tokenizer(self.label_mapping(text), truncation=False, return_length=True, |
| return_overflowing_tokens=False, padding=True, return_tensors="pt") |
| tokens = batch_encoding["input_ids"].to(self.device) |
| |
| outputs = self.transformer(input_ids=tokens).last_hidden_state[:, 1, :] |
|
|
| |
| |
| |
| indices = (text.contiguous().view(-1) == 8).nonzero(as_tuple=True)[0] |
| outputs[indices] = 0 |
|
|
| outputs = outputs.contiguous().view(input_shape[0], input_shape[1], -1) |
| return outputs |
| |
|
|
| |
| |
|
|
| def label_mapping(self, batch_sequence): |
| max_sequence_length = batch_sequence.shape[1] |
| |
| |
| batch_sequence = batch_sequence.contiguous().view(-1).tolist() |
|
|
| sentences = [] |
| for i, attribute in enumerate(batch_sequence): |
| |
| if attribute == 0: |
| sentence = "START" |
| elif attribute == 7: |
| sentence = "END" |
| elif attribute == 8: |
| sentence = "PAD" |
| else: |
| if i < len(batch_sequence) - 1 and batch_sequence[i + 1] == 8: |
| order = 'last' |
| else: |
| order = ORDER_MAPPING[i % max_sequence_length] |
| |
| sentence = f"{ATTRIBUTE_MAPPING[attribute]} {order}" |
| sentences.append(sentence) |
|
|
| return sentences |
|
|
|
|
| def encode(self, text): |
| return self(text) |
| |
|
|
|
|
| if __name__ == "__main__": |
| model = FrozenCLIPEmbedder().cuda() |
| model(torch.tensor([[0, 1, 2, 3, 4], [0, 3, 2, 8, 8], [0, 1, 5, 6, 8]]).cuda()) |