from typing import Dict, List, Any from transformers import AutoProcessor, AutoTokenizer, CLIPModel from PIL import Image import requests class EndpointHandler(): def __init__(self, path=""): self.model = CLIPModel.from_pretrained(path) self.processor = AutoProcessor.from_pretrained(path) self.tokenizer = AutoTokenizer.from_pretrained(path) def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]: """ data args: inputs (:obj: `str`) date (:obj: `str`) Return: A :obj:`list` | `dict`: will be serialized and returned """ # get inputs inputs = data.pop("inputs", data) # if type is image, data is url params = data.pop("parameters", data) if params['datatype'] == 'image': image = Image.open(requests.get(inputs, stream=True).raw) # embed image features = self.embed_image(image) elif params['datatype'] == 'text': # embed text features = self.embed_text(inputs) # return features return {"features": features[0]} def embed_text(self, text): inputs = self.tokenizer(text, padding=True, return_tensors="pt") text_features = self.model.get_text_features(**inputs) # normalize text_features = text_features / text_features.norm(dim=-1, keepdim=True) # make a list of text features text_features = text_features.tolist() return text_features def embed_image(self, image): inputs = self.processor(images=image, return_tensors="pt") image_features = self.model.get_image_features(**inputs) # normalize image_features = image_features / image_features.norm(dim=-1, keepdim=True) # make a list of image features image_features = image_features.tolist() return image_features