sam-brause
commited on
Commit
·
6dbf008
1
Parent(s):
7c8801e
try to fix image error
Browse files- .DS_Store +0 -0
- handler.py +10 -20
.DS_Store
CHANGED
|
Binary files a/.DS_Store and b/.DS_Store differ
|
|
|
handler.py
CHANGED
|
@@ -7,7 +7,7 @@ class EndpointHandler:
|
|
| 7 |
def __init__(self, model_dir):
|
| 8 |
self.model = torch.jit.load(f"{model_dir}/model_scripted_efficientnet.pt")
|
| 9 |
self.model.eval()
|
| 10 |
-
|
| 11 |
self.transform = transforms.Compose([
|
| 12 |
transforms.Resize((224, 224)),
|
| 13 |
transforms.ToTensor(),
|
|
@@ -16,7 +16,7 @@ class EndpointHandler:
|
|
| 16 |
std=[0.229, 0.224, 0.225]
|
| 17 |
)
|
| 18 |
])
|
| 19 |
-
|
| 20 |
self.supported_issues = [
|
| 21 |
"Dark Spots",
|
| 22 |
"Dry Lips",
|
|
@@ -31,36 +31,26 @@ class EndpointHandler:
|
|
| 31 |
]
|
| 32 |
|
| 33 |
def __call__(self, data):
|
| 34 |
-
|
| 35 |
-
Args:
|
| 36 |
-
data (:obj:`Dict[str, Any]`): Dictionary with the following key:
|
| 37 |
-
- "inputs" (str): Base64 encoded string of the image
|
| 38 |
-
Return:
|
| 39 |
-
A :obj:`Dict[str, Any]` with the following keys:
|
| 40 |
-
- "predictions" (List[str]): List of predicted issues
|
| 41 |
-
"""
|
| 42 |
-
if isinstance(data, bytes):
|
| 43 |
image = Image.open(io.BytesIO(data)).convert("RGB")
|
| 44 |
elif isinstance(data, dict) and "inputs" in data:
|
| 45 |
if isinstance(data["inputs"], bytes):
|
| 46 |
image = Image.open(io.BytesIO(data["inputs"])).convert("RGB")
|
| 47 |
-
else
|
| 48 |
-
|
|
|
|
| 49 |
else:
|
| 50 |
raise ValueError("Unsupported input format")
|
| 51 |
|
| 52 |
-
# Preprocess image
|
| 53 |
image_tensor = self.transform(image).unsqueeze(0)
|
| 54 |
-
|
| 55 |
-
# Run inference
|
| 56 |
with torch.no_grad():
|
| 57 |
outputs = self.model(image_tensor)
|
| 58 |
probabilities = torch.nn.functional.softmax(outputs, dim=1)
|
| 59 |
-
|
| 60 |
-
# Post-process results
|
| 61 |
predictions = probabilities.squeeze().tolist()
|
| 62 |
output = [issue for issue, prob in zip(self.supported_issues, predictions) if prob > 0.5]
|
| 63 |
-
|
| 64 |
return {"predictions": output}
|
| 65 |
|
| 66 |
-
EndpointHandler = EndpointHandler
|
|
|
|
| 7 |
def __init__(self, model_dir):
|
| 8 |
self.model = torch.jit.load(f"{model_dir}/model_scripted_efficientnet.pt")
|
| 9 |
self.model.eval()
|
| 10 |
+
|
| 11 |
self.transform = transforms.Compose([
|
| 12 |
transforms.Resize((224, 224)),
|
| 13 |
transforms.ToTensor(),
|
|
|
|
| 16 |
std=[0.229, 0.224, 0.225]
|
| 17 |
)
|
| 18 |
])
|
| 19 |
+
|
| 20 |
self.supported_issues = [
|
| 21 |
"Dark Spots",
|
| 22 |
"Dry Lips",
|
|
|
|
| 31 |
]
|
| 32 |
|
| 33 |
def __call__(self, data):
|
| 34 |
+
if isinstance(data, bytes): # Correctly handle bytes input
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
image = Image.open(io.BytesIO(data)).convert("RGB")
|
| 36 |
elif isinstance(data, dict) and "inputs" in data:
|
| 37 |
if isinstance(data["inputs"], bytes):
|
| 38 |
image = Image.open(io.BytesIO(data["inputs"])).convert("RGB")
|
| 39 |
+
# REMOVE the else block below. data["inputs"] is already bytes.
|
| 40 |
+
# else: # This branch is unnecessary and causes the error
|
| 41 |
+
# image = Image.open(io.BytesIO(data["inputs"].encode())).convert("RGB") # Remove .encode()
|
| 42 |
else:
|
| 43 |
raise ValueError("Unsupported input format")
|
| 44 |
|
|
|
|
| 45 |
image_tensor = self.transform(image).unsqueeze(0)
|
| 46 |
+
|
|
|
|
| 47 |
with torch.no_grad():
|
| 48 |
outputs = self.model(image_tensor)
|
| 49 |
probabilities = torch.nn.functional.softmax(outputs, dim=1)
|
| 50 |
+
|
|
|
|
| 51 |
predictions = probabilities.squeeze().tolist()
|
| 52 |
output = [issue for issue, prob in zip(self.supported_issues, predictions) if prob > 0.5]
|
| 53 |
+
|
| 54 |
return {"predictions": output}
|
| 55 |
|
| 56 |
+
EndpointHandler = EndpointHandler # Crucial for import
|