| from sentence_transformers import SentenceTransformer | |
| import torch.nn.functional as F | |
| import torch | |
| class Load_EmbeddingModels: | |
| def __init__(self, model_name ='jinaai/jina-clip-v2'): | |
| self.device = "cuda" if torch.cuda.is_available() else "cpu" | |
| self.img_model_ID = model_name | |
| self.img_model = self.get_cp_img_model_info(self.img_model_ID) | |
| def get_cp_img_model_info(self, model_name): | |
| print('Loading SentenceTransformer model') | |
| model =SentenceTransformer(model_name, trust_remote_code=True) | |
| model = model.to(self.device) | |
| return model | |
| def get_single_image_embedding_cp_im(self, my_image): | |
| embedding = self.img_model.encode( | |
| my_image, | |
| normalize_embeddings=True | |
| ) | |
| values = embedding.tolist() | |
| return values | |
| class Get_EmbeddingModels: | |
| def __init__(self, model_name='jinaai/jina-clip-v2'): | |
| self.embed_model = Load_EmbeddingModels(model_name) | |
| def get_dense_embd(self, img): | |
| embd = self.embed_model.get_single_image_embedding_cp_im(img) | |
| return embd | |