from typing import Dict, List, Any from transformers import CLIPTokenizer, CLIPModel import numpy as np import os class EndpointHandler: def __init__(self, path=""): self.model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") self.artwork_urls = np.load(os.path.join(path, "artwork_urls.npy"), allow_pickle=True) self.embeddings = np.load(os.path.join(path, "embeddings.npy"), allow_pickle=True) def __call__(self, data: Dict[str, Any]) -> List[float]: """ data args: inputs (:obj: `str` | `PIL.Image` | `np.array`) kwargs Return: A :obj:`list` | `dict`: will be serialized and returned """ inputs = self.tokenizer(data["inputs"], padding=True, return_tensors="pt") text_features = self.model.get_text_features(**inputs) text_features = text_features.detach().numpy() input_embedding = text_features[0] input_embedding = input_embedding / np.linalg.norm(input_embedding) cos_score = self.embeddings @ input_embedding top_10 = cos_score.argsort()[-100:][::-1] return self.artwork_urls[top_10].tolist()