Commit
·
fab02fe
0
Parent(s):
Initial commit
Browse files- README.md +2 -0
- area_measurement.py +76 -0
- bin/init-local.sh +10 -0
- requirements.txt +6 -0
- utils.py +151 -0
README.md
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coc_ui
|
| 2 |
+
UI for the cumulus expansion computer
|
area_measurement.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from streamlit import session_state as sess
|
| 4 |
+
|
| 5 |
+
from utils import raw_image, overlay_mask_on_image, image_to_file_path, AreaModel
|
| 6 |
+
|
| 7 |
+
import streamlit.components.v1 as components
|
| 8 |
+
|
| 9 |
+
# Set the title of the app
|
| 10 |
+
st.write("### Cumulus Oocyte Complex (COC) area estimation")
|
| 11 |
+
|
| 12 |
+
# Add a description
|
| 13 |
+
st.write("This demo allows the computation of the area in pixels of a cumulus oocyte complex from its photograph.")
|
| 14 |
+
st.write("Upload the photograph of your oocyte here:")
|
| 15 |
+
if 'image_path' not in sess:
|
| 16 |
+
sess.image_path = None
|
| 17 |
+
if 'last_image_path' not in sess:
|
| 18 |
+
sess.last_image_path = None
|
| 19 |
+
if 'model' not in sess:
|
| 20 |
+
sess.model = AreaModel()
|
| 21 |
+
if 'image' not in sess:
|
| 22 |
+
sess.image = None
|
| 23 |
+
|
| 24 |
+
def compute_area():
|
| 25 |
+
if sess.last_image_path != sess.image_path:
|
| 26 |
+
with (st.spinner("In progress, should not be more than 10 seconds...")):
|
| 27 |
+
sess.img = raw_image(sess.image_path)
|
| 28 |
+
sess.area, sess.roi_img, sess.mask = sess.model.compute_area(sess.img)
|
| 29 |
+
sess.last_image_path = sess.image_path
|
| 30 |
+
return
|
| 31 |
+
|
| 32 |
+
def update_from_uploader():
|
| 33 |
+
uploaded_file = sess["uploaded_file"]
|
| 34 |
+
if uploaded_file is not None:
|
| 35 |
+
sess.image_path = image_to_file_path(uploaded_file)
|
| 36 |
+
compute_area()
|
| 37 |
+
|
| 38 |
+
image = st.file_uploader("Oocyte image", type=["jpg", "jpeg", "png"] , key="uploaded_file", on_change=update_from_uploader)
|
| 39 |
+
|
| 40 |
+
if sess.image_path is not None:
|
| 41 |
+
st.write(f"### Area computed = {sess.area:.2f} pixels")
|
| 42 |
+
col1, col2, col3 = st.columns(3)
|
| 43 |
+
with col1:
|
| 44 |
+
st.image(sess.img, caption='Original oocyte', use_column_width=True)
|
| 45 |
+
with col2:
|
| 46 |
+
st.image(sess.roi_img, caption='ROI', use_column_width=True)
|
| 47 |
+
with col3:
|
| 48 |
+
st.image(overlay_mask_on_image(sess.roi_img, sess.mask), caption='ROI with the deep learning computed mask', use_column_width=True, clamp=True)
|
| 49 |
+
|
| 50 |
+
# Show examples
|
| 51 |
+
|
| 52 |
+
st.write("Or select one of the examples here:")
|
| 53 |
+
examples = ["app/static/oocytes/immature/1.1.png",
|
| 54 |
+
"app/static/oocytes/immature/14.10.png",
|
| 55 |
+
"app/static/oocytes/immature/13.4.png"]
|
| 56 |
+
examples_to_load = [str(Path(*Path(ex).parts[1:])) for ex in examples]
|
| 57 |
+
example_imgs = [raw_image(ex) for ex in examples_to_load]
|
| 58 |
+
cols = st.columns(len(examples))
|
| 59 |
+
for i, col in enumerate(cols):
|
| 60 |
+
with col:
|
| 61 |
+
def set_example(i):
|
| 62 |
+
def f():
|
| 63 |
+
sess.image_path = examples_to_load[i]
|
| 64 |
+
# print(sess.image_path)
|
| 65 |
+
compute_area()
|
| 66 |
+
return f
|
| 67 |
+
st.button("Example "+str(i+1), on_click=set_example(i))
|
| 68 |
+
st.image(example_imgs[i], caption='Example ' + str(i+1), use_column_width=True)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
footer_html = """<div style='text-align: center;'>
|
| 72 |
+
<p><img src="./app/static/img/EurovaLogo.png" height=100 alt="EUROVA logo"/>
|
| 73 |
+
<p>Developed with ❤️ at <a href="https://iiia.csic.es"><img src="./app/static/img/iiia-logo.png" height=30 alt="IIIA logo"/> IIIA - CSIC</a> by <a href="https://www.iiia.csic.es/~cerquide">Jesus Cerquides</a>, Giorgios Athanasiou and Josep LLuís Arcos </p>
|
| 74 |
+
<p><img src="./app/static/img/MSCA.jpg" height=30 alt="MSCA logo"/> Funded by the European Union Horizon 2020 research and innovation programme under the Marie Sklodowka-Curie grant agreement 860960
|
| 75 |
+
</div>"""
|
| 76 |
+
st.markdown(footer_html, unsafe_allow_html=True)
|
bin/init-local.sh
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
if [[ ! -d .venv ]]; then
|
| 3 |
+
python3 -m venv .venv
|
| 4 |
+
fi
|
| 5 |
+
# sudo apt install libgtk-3-dev
|
| 6 |
+
source .venv/bin/activate
|
| 7 |
+
pip install -U pip
|
| 8 |
+
pip install -r requirements.txt
|
| 9 |
+
export PYTHONPATH=$PYTHONPATH:${PWD}/src:${PWD}/../cumulus_expansion/src
|
| 10 |
+
#export LD_LIBRARY_PATH=/home/arcos/miniconda3/envs/aix/lib/
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
dvc[ssh]
|
| 3 |
+
opencv-python
|
| 4 |
+
scikit-image
|
| 5 |
+
tensorflow
|
| 6 |
+
keras-nightly
|
utils.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import gzip
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import tempfile
|
| 5 |
+
import cv2
|
| 6 |
+
import tensorflow as tf
|
| 7 |
+
import skimage.morphology
|
| 8 |
+
import skimage.filters.rank
|
| 9 |
+
import skimage.util
|
| 10 |
+
from tensorflow.keras.models import load_model
|
| 11 |
+
|
| 12 |
+
from aix.utils import hardened_dice_coef
|
| 13 |
+
from aix.losses import dice_loss
|
| 14 |
+
|
| 15 |
+
class AreaModel:
|
| 16 |
+
def __init__(self, model_path="../cumulus_expansion/models/majority_roi_production/majority_roi_production.keras"):
|
| 17 |
+
self.model_path = model_path
|
| 18 |
+
self.model = load_model(model_path)
|
| 19 |
+
self.IMG_SIZE = (192, 240)
|
| 20 |
+
self.INPUT_CHANNELS = 1
|
| 21 |
+
self.IMG_SHAPE = (self.IMG_SIZE[0], self.IMG_SIZE[1], self.INPUT_CHANNELS)
|
| 22 |
+
self.MASK_SHAPE = (self.IMG_SIZE[0], self.IMG_SIZE[1], 1)
|
| 23 |
+
|
| 24 |
+
def compute_area(self, img):
|
| 25 |
+
roi_img = roi(img)
|
| 26 |
+
roi_shape = roi_img.shape
|
| 27 |
+
#print(roi_img.dtype, roi_shape)
|
| 28 |
+
t_img = tensor(roi_img, self.IMG_SHAPE)
|
| 29 |
+
y = self.model.predict(x=t_img)
|
| 30 |
+
mask = y[0]
|
| 31 |
+
#print(roi_shape)
|
| 32 |
+
resized_mask = tf.image.resize(mask, roi_shape)
|
| 33 |
+
area = np.sum(resized_mask)
|
| 34 |
+
return area, roi_img, resized_mask
|
| 35 |
+
|
| 36 |
+
def image_to_file_path(image):
|
| 37 |
+
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
| 38 |
+
temp_file.write(image.read())
|
| 39 |
+
return temp_file.name
|
| 40 |
+
|
| 41 |
+
def raw_image(file_path, remove_alpha=True):
|
| 42 |
+
img = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
|
| 43 |
+
#print(img.shape)
|
| 44 |
+
if len(img.shape) == 3 and img.shape[2] == 4:
|
| 45 |
+
#print("The image is in RGBA format. We remove the A")
|
| 46 |
+
img = img[:, :, :3]
|
| 47 |
+
return img
|
| 48 |
+
|
| 49 |
+
def tensor(img, shape):
|
| 50 |
+
#img = raw_image(file_path, cv2.IMREAD_GRAYSCALE)
|
| 51 |
+
if len(img.shape) == 2:
|
| 52 |
+
img.shape = (img.shape[0], img.shape[1], 1)
|
| 53 |
+
t = tf.convert_to_tensor(img)
|
| 54 |
+
t = tf.image.resize(t, shape[:2])
|
| 55 |
+
t = tf.reshape(t, (1, *shape))
|
| 56 |
+
t = tf.cast(t, tf.float32)
|
| 57 |
+
return t
|
| 58 |
+
|
| 59 |
+
def roi(cv2_img):
|
| 60 |
+
roi, (left, top), (right, bottom) = extract_roi(cv2_img / 255., filled=True, border=.01)
|
| 61 |
+
#print("ROI found", (left, top), (right, bottom))
|
| 62 |
+
return cv2_img[top:bottom, left:right]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def overlay_mask_on_image(image, mask, alpha=0.1, mask_color=(0, 255, 0)):
|
| 66 |
+
"""
|
| 67 |
+
Overlays a mask on an image.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
image (np.array): The original image.
|
| 71 |
+
mask (np.array): The mask to overlay.
|
| 72 |
+
alpha (float): The opacity of the mask.
|
| 73 |
+
mask_color (tuple): The color to use for the mask.
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
np.array: The image with the mask overlay.
|
| 77 |
+
"""
|
| 78 |
+
rgb_image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
| 79 |
+
colored_mask = np.zeros_like(rgb_image)
|
| 80 |
+
#print(colored_mask.shape)
|
| 81 |
+
#print(mask.shape)
|
| 82 |
+
colored_mask[:, :, 0] = mask_color[0] * mask[:, :, 0]
|
| 83 |
+
colored_mask[:, :, 1] = mask_color[1] * mask[:, :, 0]
|
| 84 |
+
colored_mask[:, :, 2] = mask_color[2] * mask[:, :, 0]
|
| 85 |
+
|
| 86 |
+
#colored_mask *= mask
|
| 87 |
+
|
| 88 |
+
# Overlay the mask on the image
|
| 89 |
+
overlay = cv2.addWeighted(rgb_image, 1, colored_mask, alpha, 0)
|
| 90 |
+
|
| 91 |
+
return overlay
|
| 92 |
+
|
| 93 |
+
def local_entropy(im, kernel_size=5, normalize=True):
|
| 94 |
+
kernel=skimage.morphology.disk(kernel_size)
|
| 95 |
+
entr_img = skimage.filters.rank.entropy(skimage.util.img_as_ubyte(im), kernel)
|
| 96 |
+
if normalize:
|
| 97 |
+
max_img = np.max(entr_img)
|
| 98 |
+
entr_img = (entr_img*255/max_img).astype(np.uint8)
|
| 99 |
+
return entr_img
|
| 100 |
+
|
| 101 |
+
def calc_dim(contour):
|
| 102 |
+
c_0 = [ point[0][0] for point in contour]
|
| 103 |
+
c_1 = [ point[0][1] for point in contour]
|
| 104 |
+
return (min(c_0), max(c_0), min(c_1), max(c_1))
|
| 105 |
+
|
| 106 |
+
def calc_size(dim):
|
| 107 |
+
return (dim[1] - dim[0]) * (dim[3] - dim[2])
|
| 108 |
+
|
| 109 |
+
def calc_dist(dim1, dim2):
|
| 110 |
+
return None
|
| 111 |
+
|
| 112 |
+
def extract_roi(img, threshold=135, kernel_size=5, min_fratio=.3, max_sratio=5, filled=True, border=.01):
|
| 113 |
+
|
| 114 |
+
entr_img = local_entropy(img, kernel_size=kernel_size)
|
| 115 |
+
_, mask = cv2.threshold(entr_img, threshold, 255, cv2.THRESH_BINARY)
|
| 116 |
+
|
| 117 |
+
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
| 118 |
+
|
| 119 |
+
contours_d = [calc_dim(c) for c in contours]
|
| 120 |
+
contours_sizes = [calc_size(c) for c in contours_d]
|
| 121 |
+
contour_indices = np.argsort(contours_sizes)[::-1]
|
| 122 |
+
|
| 123 |
+
# remove artifacts
|
| 124 |
+
fratio = min_fratio
|
| 125 |
+
sratio = max_sratio
|
| 126 |
+
idx = -1
|
| 127 |
+
while fratio<=min_fratio or sratio>=max_sratio:
|
| 128 |
+
idx += 1
|
| 129 |
+
biggest = contour_indices[idx]
|
| 130 |
+
filled_mask = np.zeros(img.shape, dtype=np.uint8)
|
| 131 |
+
filled_mask = cv2.fillPoly(filled_mask, [contours[biggest]], 255)
|
| 132 |
+
fratio = filled_mask.sum()/255/contours_sizes[biggest]
|
| 133 |
+
cdim = contours_d[biggest]
|
| 134 |
+
sratio = (cdim[3]-cdim[2])/(cdim[1]-cdim[0])
|
| 135 |
+
if sratio<1: sratio = 1 / sratio
|
| 136 |
+
#print(fratio, sratio, cdim, filled_mask.sum()//255)
|
| 137 |
+
|
| 138 |
+
# generating the mask
|
| 139 |
+
filled_mask = np.zeros(img.shape, dtype=np.uint8)
|
| 140 |
+
|
| 141 |
+
extra = ( int(img.shape[0] * border) , int(img.shape[1] * border) )
|
| 142 |
+
origin = (max(0, cdim[0]-extra[1]), max(0, cdim[2]-extra[0]))
|
| 143 |
+
to = (min(img.shape[1]-1 , cdim[1]+extra[1]), min(img.shape[0]-1 , cdim[3]+extra[0]))
|
| 144 |
+
|
| 145 |
+
if filled:
|
| 146 |
+
filled_mask = cv2.rectangle(filled_mask, origin, to, 255, -1)
|
| 147 |
+
else:
|
| 148 |
+
filled_mask = cv2.rectangle(filled_mask, origin, to, 255, 2)
|
| 149 |
+
|
| 150 |
+
return filled_mask, origin, to
|
| 151 |
+
|