"inputs" is required
Browse files- handler.py +4 -2
handler.py
CHANGED
|
@@ -14,8 +14,9 @@ class EndpointHandler():
|
|
| 14 |
self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
|
| 15 |
|
| 16 |
def __call__(self, data):
|
| 17 |
-
|
| 18 |
-
|
|
|
|
| 19 |
|
| 20 |
if text_input:
|
| 21 |
processor = self.processor(text=text_input, return_tensors="pt", padding=True)
|
|
@@ -28,3 +29,4 @@ class EndpointHandler():
|
|
| 28 |
return self.image_model(**processor).image_embeds.tolist()
|
| 29 |
else:
|
| 30 |
return None
|
|
|
|
|
|
| 14 |
self.processor = CLIPProcessor.from_pretrained("rbanfield/clip-vit-large-patch14")
|
| 15 |
|
| 16 |
def __call__(self, data):
|
| 17 |
+
inputs = data.pop("inputs", None)
|
| 18 |
+
text_input = inputs["text"] if "text" in inputs else None
|
| 19 |
+
image_input = inputs["image"] if "image" in inputs else None
|
| 20 |
|
| 21 |
if text_input:
|
| 22 |
processor = self.processor(text=text_input, return_tensors="pt", padding=True)
|
|
|
|
| 29 |
return self.image_model(**processor).image_embeds.tolist()
|
| 30 |
else:
|
| 31 |
return None
|
| 32 |
+
[rbanfield@atlas clip-vit-large-pa
|