vision
rbanfield commited on
Commit
8e0a4cd
·
1 Parent(s): b926327

"inputs" is required

Browse files
Files changed (1) hide show
  1. handler.py +4 -2
handler.py CHANGED
@@ -14,8 +14,9 @@ class EndpointHandler():
14
  self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
15
 
16
  def __call__(self, data):
17
- text_input = data.pop("text", None)
18
- image_input = data.pop("image", None)
 
19
 
20
  if text_input:
21
  processor = self.processor(text=text_input, return_tensors="pt", padding=True)
@@ -28,3 +29,4 @@ class EndpointHandler():
28
  return self.image_model(**processor).image_embeds.tolist()
29
  else:
30
  return None
 
 
14
  self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
15
 
16
  def __call__(self, data):
17
+ inputs = data.pop("inputs", None)
18
+ text_input = inputs["text"] if "text" in inputs else None
19
+ image_input = inputs["image"] if "image" in inputs else None
20
 
21
  if text_input:
22
  processor = self.processor(text=text_input, return_tensors="pt", padding=True)
 
29
  return self.image_model(**processor).image_embeds.tolist()
30
  else:
31
  return None
32
+ [rbanfield@atlas clip-vit-large-pa