vision
rbanfield commited on
Commit
e36f852
·
1 Parent(s): eddf2f6

continue debugging adventures

Browse files
Files changed (1) hide show
  1. handler.py +6 -5
handler.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from io import BytesIO
2
  import base64
3
 
@@ -15,19 +16,19 @@ class EndpointHandler():
15
 
16
  def __call__(self, data):
17
  inputs = data.pop("inputs", None)
18
- print(inputs)
19
  text_input = inputs["text"] if "text" in inputs else None
20
  image_input = inputs["image"] if "image" in inputs else None
21
 
22
  if text_input:
23
- print("in text mode")
24
- print(text_input)
25
  processor = self.processor(text=text_input, return_tensors="pt", padding=True)
26
  with torch.no_grad():
27
  return self.text_model(**processor).pooler_output.tolist()
28
  elif image_input:
29
- print("in image mode")
30
- print(image_input)
31
  image = Image.open(BytesIO(base64.b64decode(image_input)))
32
  processor = self.processor(images=image, return_tensors="pt")
33
  with torch.no_grad():
 
1
+ import sys
2
  from io import BytesIO
3
  import base64
4
 
 
16
 
17
  def __call__(self, data):
18
  inputs = data.pop("inputs", None)
19
+ print(inputs, file=sys.stderr)
20
  text_input = inputs["text"] if "text" in inputs else None
21
  image_input = inputs["image"] if "image" in inputs else None
22
 
23
  if text_input:
24
+ print("in text mode", file=sys.stderr)
25
+ print(text_input, file=sys.stderr)
26
  processor = self.processor(text=text_input, return_tensors="pt", padding=True)
27
  with torch.no_grad():
28
  return self.text_model(**processor).pooler_output.tolist()
29
  elif image_input:
30
+ print("in image mode", file=sys.stderr)
31
+ print(image_input, file=sys.stderr)
32
  image = Image.open(BytesIO(base64.b64decode(image_input)))
33
  processor = self.processor(images=image, return_tensors="pt")
34
  with torch.no_grad():