import torch.nn as nn from transformers import SiglipTextModel from modules.build import LANGUAGE_REGISTRY import torch @LANGUAGE_REGISTRY.register() class FGCLIPLanguageEncoder(nn.Module): def __init__(self, cfg, weights="google/siglip-base-patch16-224", max_position_embeddings = 512): super().__init__() # Load tokenizer and model self.model = SiglipTextModel.from_pretrained(weights, max_position_embeddings = max_position_embeddings) def forward(self, txt_ids, **kwargs): # txt_ids: (B, L) caption_input = torch.tensor(txt_ids) outputs = self.model(input_ids=txt_ids).last_hidden_state return outputs