| from torch import Tensor, nn |
| from transformers import (CLIPTextModel, CLIPTokenizer, T5EncoderModel, |
| T5Tokenizer) |
| import os |
|
|
| class HFEmbedder(nn.Module): |
| def __init__(self, version: str, max_length: int, is_clip, **hf_kwargs): |
| super().__init__() |
| self.is_clip = is_clip |
| self.max_length = max_length |
| self.output_key = "pooler_output" if self.is_clip else "last_hidden_state" |
|
|
| if self.is_clip: |
| self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length) |
| self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs) |
| else: |
| self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length) |
| self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs) |
|
|
| self.hf_module = self.hf_module.eval().requires_grad_(False) |
|
|
|
|
| def forward(self, text: list[str]) -> Tensor: |
| batch_encoding = self.tokenizer( |
| text, |
| truncation=True, |
| max_length=self.max_length, |
| return_length=False, |
| return_overflowing_tokens=False, |
| padding="max_length", |
| return_tensors="pt", |
| ) |
|
|
| if self.is_clip: |
| flag = 'clip' |
| else: |
| flag = 't5' |
| print(f'foward {flag}') |
| input_ids = batch_encoding["input_ids"] |
| print(f"input_ids shape: {input_ids.shape}, max_length: {self.max_length}") |
| assert input_ids.shape[1] == self.max_length, f"Sequence length {input_ids.shape[1]} does not match max_length {self.max_length}" |
| print(input_ids) |
|
|
| print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") |
| print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") |
| print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") |
| print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") |
|
|
| outputs = self.hf_module( |
| input_ids=input_ids.to(self.hf_module.device), |
| attention_mask=batch_encoding["attention_mask"].to(self.hf_module.device), |
| output_hidden_states=False, |
| ) |
| return outputs[self.output_key] |
|
|