aayushgs commited on
Commit
39d49ea
·
verified ·
1 Parent(s): 3d9ad3b

update handler

Browse files
Files changed (1) hide show
  1. handler.py +22 -28
handler.py CHANGED
@@ -1,33 +1,27 @@
1
- import torch
2
  from PIL import Image
3
- import io
4
- from transformers import CLIPProcessor, CLIPModel
 
5
 
6
- class EndpointHandler:
7
- def __init__(self, model_dir):
8
- # Initialize the model and processor with a model directory if necessary
9
- self.model = CLIPModel.from_pretrained(model_dir)
10
- self.processor = CLIPProcessor.from_pretrained(model_dir)
11
 
12
- def preprocess(self, data):
13
- # Preprocess the image and text
14
- image_data = data['image']
15
- text_data = data['text']
16
-
17
- # Convert image bytes to a PIL Image
18
- image = Image.open(io.BytesIO(image_data))
19
-
20
- # Use the processor to prepare inputs
21
- inputs = self.processor(text=[text_data], images=image, return_tensors="pt", padding=True)
22
- return inputs
 
 
23
 
24
- def inference(self, inputs):
25
- # Run the model
26
- outputs = self.model(**inputs)
27
- return outputs
28
 
29
- def postprocess(self, outputs):
30
- # Extract and process the output logits
31
- logits_per_image = outputs.logits_per_image # this is the image-text similarity score
32
- probs = logits_per_image.softmax(dim=1).tolist()[0]
33
- return probs
 
1
+ from typing import Dict, List, Any
2
  from PIL import Image
3
+ from io import BytesIO
4
+ from transformers import pipeline
5
+ import base64
6
 
 
 
 
 
 
7
 
8
+ class EndpointHandler():
9
+ def __init__(self, path=""):
10
+ self.pipeline=pipeline("zero-shot-image-classification",model=path)
11
+
12
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
13
+ """
14
+ data args:
15
+ images (:obj:`string`)
16
+ candiates (:obj:`list`)
17
+ Return:
18
+ A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
19
+ """
20
+ inputs = data.pop("inputs", data)
21
 
22
+ # decode base64 image to PIL
23
+ image = Image.open(BytesIO(base64.b64decode(inputs['image'])))
 
 
24
 
25
+ # run prediction one image wit provided candiates
26
+ prediction = self.pipeline(images=[image], candidate_labels=inputs["candiates"])
27
+ return prediction[0]