File size: 677 Bytes
c94c8c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
import torch.nn as nn
from transformers import SiglipTextModel
from modules.build import LANGUAGE_REGISTRY
import torch
@LANGUAGE_REGISTRY.register()
class FGCLIPLanguageEncoder(nn.Module):
def __init__(self, cfg, weights="google/siglip-base-patch16-224", max_position_embeddings = 512):
super().__init__()
# Load tokenizer and model
self.model = SiglipTextModel.from_pretrained(weights, max_position_embeddings = max_position_embeddings)
def forward(self, txt_ids, **kwargs):
# txt_ids: (B, L)
caption_input = torch.tensor(txt_ids)
outputs = self.model(input_ids=txt_ids).last_hidden_state
return outputs
|