Aryeh Rotberg commited on
Commit ·
2940ce8
1
Parent(s): 336905f
Initialized project.
Browse files- .dockerignore +34 -0
- Dockerfile +15 -0
- api.py +118 -0
- compose.yaml +6 -0
- models/production/unetplusplus_resnet34.pth +3 -0
- requirements.txt +13 -0
.dockerignore
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Include any files or directories that you don't want to be copied to your
|
| 2 |
+
# container here (e.g., local build artifacts, temporary files, etc.).
|
| 3 |
+
#
|
| 4 |
+
# For more help, visit the .dockerignore file reference guide at
|
| 5 |
+
# https://docs.docker.com/go/build-context-dockerignore/
|
| 6 |
+
|
| 7 |
+
**/.DS_Store
|
| 8 |
+
**/__pycache__
|
| 9 |
+
**/.venv
|
| 10 |
+
**/.classpath
|
| 11 |
+
**/.dockerignore
|
| 12 |
+
**/.env
|
| 13 |
+
**/.git
|
| 14 |
+
**/.gitignore
|
| 15 |
+
**/.project
|
| 16 |
+
**/.settings
|
| 17 |
+
**/.toolstarget
|
| 18 |
+
**/.vs
|
| 19 |
+
**/.vscode
|
| 20 |
+
**/*.*proj.user
|
| 21 |
+
**/*.dbmdl
|
| 22 |
+
**/*.jfm
|
| 23 |
+
**/bin
|
| 24 |
+
**/charts
|
| 25 |
+
**/docker-compose*
|
| 26 |
+
**/compose.y*ml
|
| 27 |
+
**/Dockerfile*
|
| 28 |
+
**/node_modules
|
| 29 |
+
**/npm-debug.log
|
| 30 |
+
**/obj
|
| 31 |
+
**/secrets.dev.yaml
|
| 32 |
+
**/values.dev.yaml
|
| 33 |
+
LICENSE
|
| 34 |
+
README.md
|
Dockerfile
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9.18
|
| 2 |
+
|
| 3 |
+
RUN useradd -m -u 1000 user
|
| 4 |
+
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
| 8 |
+
|
| 9 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
| 10 |
+
|
| 11 |
+
COPY --chown=user . /app
|
| 12 |
+
|
| 13 |
+
EXPOSE 7860
|
| 14 |
+
|
| 15 |
+
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
|
api.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Tuple
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
|
| 4 |
+
from fastapi import FastAPI, UploadFile, File
|
| 5 |
+
from starlette.responses import StreamingResponse
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
import cv2
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from torchvision import transforms
|
| 13 |
+
|
| 14 |
+
from PIL import Image, ImageChops
|
| 15 |
+
|
| 16 |
+
import segmentation_models_pytorch as smp
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
constants: dict = {
|
| 20 |
+
'encoder_name': 'resnet34',
|
| 21 |
+
'encoder_weights': 'imagenet',
|
| 22 |
+
|
| 23 |
+
'sigmoid_threshold': 0.55,
|
| 24 |
+
|
| 25 |
+
'model_path': 'models/production/unetplusplus_resnet34.pth'
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
def load_model() -> smp.UnetPlusPlus:
|
| 29 |
+
'''
|
| 30 |
+
Returns:
|
| 31 |
+
model: smp.UnetPlusPlus
|
| 32 |
+
'''
|
| 33 |
+
global model
|
| 34 |
+
|
| 35 |
+
if model is None:
|
| 36 |
+
model = smp.UnetPlusPlus(encoder_name=constants['encoder_name'],
|
| 37 |
+
encoder_weights=constants['encoder_weights'],
|
| 38 |
+
in_channels=3,
|
| 39 |
+
classes=1).to(device)
|
| 40 |
+
|
| 41 |
+
model.load_state_dict(torch.load(constants['model_path'], map_location=device))
|
| 42 |
+
|
| 43 |
+
return model
|
| 44 |
+
|
| 45 |
+
def draw_bounding_boxes(mask: np.array) -> Tuple[np.array, float]:
|
| 46 |
+
'''
|
| 47 |
+
Arguments:
|
| 48 |
+
mask: np.array (numpy)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
Tuple[np.array, float]
|
| 52 |
+
'''
|
| 53 |
+
mask = mask.astype(np.uint8)
|
| 54 |
+
|
| 55 |
+
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 56 |
+
|
| 57 |
+
if len(contours) == 0:
|
| 58 |
+
return mask, 0.0
|
| 59 |
+
|
| 60 |
+
mask_bgr = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
|
| 61 |
+
mean_area = []
|
| 62 |
+
|
| 63 |
+
for contour in contours:
|
| 64 |
+
x, y, w, h = cv2.boundingRect(contour)
|
| 65 |
+
|
| 66 |
+
max_x = x + w
|
| 67 |
+
max_y = y + h
|
| 68 |
+
|
| 69 |
+
cv2.rectangle(mask_bgr, (x, y), (max_x, max_y), (255, 0, 0), 1)
|
| 70 |
+
mean_area.append(abs(max_x - x) * abs(max_y - y))
|
| 71 |
+
|
| 72 |
+
return mask_bgr, sum(mean_area) / len(mean_area)
|
| 73 |
+
|
| 74 |
+
transform = transforms.Compose([transforms.ToTensor(),
|
| 75 |
+
transforms.Resize((256, 256), antialias=True)])
|
| 76 |
+
|
| 77 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 78 |
+
|
| 79 |
+
model = None
|
| 80 |
+
|
| 81 |
+
app = FastAPI(title='Brain MRI Medical Images Segmentation Hugging Face')
|
| 82 |
+
|
| 83 |
+
@app.post('/predict')
|
| 84 |
+
async def predict(file: UploadFile = File(...)):
|
| 85 |
+
input_image = Image.open(BytesIO(await file.read())).convert('RGB')
|
| 86 |
+
array_image = np.array(input_image)
|
| 87 |
+
transformed_image = transform(array_image)
|
| 88 |
+
transformed_image = transformed_image.unsqueeze(0)
|
| 89 |
+
transformed_image = transformed_image.to(device)
|
| 90 |
+
|
| 91 |
+
model = load_model()
|
| 92 |
+
model.eval()
|
| 93 |
+
|
| 94 |
+
with torch.no_grad():
|
| 95 |
+
prediction = model(transformed_image)
|
| 96 |
+
|
| 97 |
+
prediction = torch.sigmoid(prediction)
|
| 98 |
+
prediction = (prediction > constants['sigmoid_threshold']).float()
|
| 99 |
+
prediction = prediction.squeeze()
|
| 100 |
+
prediction = prediction.cpu()
|
| 101 |
+
prediction = prediction.numpy()
|
| 102 |
+
prediction = prediction * 255
|
| 103 |
+
prediction, mean_area = draw_bounding_boxes(prediction)
|
| 104 |
+
|
| 105 |
+
transformed_image = transformed_image.cpu()
|
| 106 |
+
transformed_image = transformed_image[0].permute(1, 2, 0)
|
| 107 |
+
transformed_image = transformed_image.numpy() * 255
|
| 108 |
+
transformed_image = transformed_image.astype(np.uint8)
|
| 109 |
+
|
| 110 |
+
input_image = Image.fromarray(transformed_image).convert('RGBA')
|
| 111 |
+
predicted_mask = Image.fromarray(prediction).convert('RGBA')
|
| 112 |
+
result = ImageChops.screen(input_image, predicted_mask)
|
| 113 |
+
|
| 114 |
+
bytes_io = BytesIO()
|
| 115 |
+
result.save(bytes_io, format='PNG')
|
| 116 |
+
bytes_io.seek(0)
|
| 117 |
+
|
| 118 |
+
return StreamingResponse(bytes_io, media_type='image/png', headers={'has_tumor': f'{prediction.max() > 0}', 'average_pixel_area': f'{round(mean_area, 2)}'})
|
compose.yaml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
services:
|
| 2 |
+
server:
|
| 3 |
+
build:
|
| 4 |
+
context: .
|
| 5 |
+
ports:
|
| 6 |
+
- 7860:7860
|
models/production/unetplusplus_resnet34.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7073a2fce88a5fb210e43f38e97c8646277a3a6a66359286eb0ebf2930a6429d
|
| 3 |
+
size 104503858
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
--find-links https://download.pytorch.org/whl/torch_stable.html
|
| 2 |
+
|
| 3 |
+
fastapi
|
| 4 |
+
uvicorn[standard]
|
| 5 |
+
numpy
|
| 6 |
+
Pillow
|
| 7 |
+
setuptools
|
| 8 |
+
starlette
|
| 9 |
+
torch==2.0.0+cu118
|
| 10 |
+
torchvision==0.15.0+cu118
|
| 11 |
+
python-multipart
|
| 12 |
+
segmentation-models-pytorch
|
| 13 |
+
opencv-python-headless
|