Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,336 Bytes
f0e942d c524e88 f0e942d c524e88 f0e942d 9db2fe3 f0e942d 6c83c25 eb71c0e c524e88 6c83c25 eb71c0e c524e88 4a31fc7 f0e942d c524e88 8e8c8d2 f0e942d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from torch import Tensor, nn
from transformers import (CLIPTextModel, CLIPTokenizer, T5EncoderModel,
T5Tokenizer)
import os
class HFEmbedder(nn.Module):
def __init__(self, version: str, max_length: int, is_clip, **hf_kwargs):
super().__init__()
self.is_clip = is_clip
self.max_length = max_length
self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
if self.is_clip:
self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
else:
self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length)
self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
self.hf_module = self.hf_module.eval().requires_grad_(False)
def forward(self, text: list[str]) -> Tensor:
batch_encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_length,
return_length=False,
return_overflowing_tokens=False,
padding="max_length",
return_tensors="pt",
)
if self.is_clip:
flag = 'clip'
else:
flag = 't5'
print(f'foward {flag}')
input_ids = batch_encoding["input_ids"]
print(f"input_ids shape: {input_ids.shape}, max_length: {self.max_length}") # Debug
assert input_ids.shape[1] == self.max_length, f"Sequence length {input_ids.shape[1]} does not match max_length {self.max_length}"
print(input_ids)
print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
print(f"self.tokenizer.vocab_size: {self.tokenizer.vocab_size}") # Debug
print(f"self.hf_module.config.vocab_size: {self.hf_module.config.vocab_size}") # Debug
outputs = self.hf_module(
input_ids=input_ids.to(self.hf_module.device),
attention_mask=batch_encoding["attention_mask"].to(self.hf_module.device),
output_hidden_states=False,
)
return outputs[self.output_key]
|