File size: 1,924 Bytes
145ef5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from typing import Dict, List, Any
from transformers import AutoProcessor, AutoTokenizer, CLIPModel
from PIL import Image
import requests
class EndpointHandler():
def __init__(self, path=""):
self.model = CLIPModel.from_pretrained(path)
self.processor = AutoProcessor.from_pretrained(path)
self.tokenizer = AutoTokenizer.from_pretrained(path)
def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
data args:
inputs (:obj: `str`)
date (:obj: `str`)
Return:
A :obj:`list` | `dict`: will be serialized and returned
"""
# get inputs
inputs = data.pop("inputs", data) # if type is image, data is url
params = data.pop("parameters", data)
if params['datatype'] == 'image':
image = Image.open(requests.get(inputs, stream=True).raw)
# embed image
features = self.embed_image(image)
elif params['datatype'] == 'text':
# embed text
features = self.embed_text(inputs)
# return features
return {"features": features[0]}
def embed_text(self, text):
inputs = self.tokenizer(text, padding=True, return_tensors="pt")
text_features = self.model.get_text_features(**inputs)
# normalize
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# make a list of text features
text_features = text_features.tolist()
return text_features
def embed_image(self, image):
inputs = self.processor(images=image, return_tensors="pt")
image_features = self.model.get_image_features(**inputs)
# normalize
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
# make a list of image features
image_features = image_features.tolist()
return image_features |