slm-edge-detection / boundary_extraction.py
aurioldegbelo's picture
Create boundary_extraction.py
5403d04
# loading all needed libraries
import cv2 # openCV as a general-purpose library for computer vision
from scipy import ndimage
from skimage import color
from skimage import feature
from skimage import segmentation
from skimage import measure
def edge_detection_mean(image, thres):
shifted = cv2.pyrMeanShiftFiltering(image, 20, 55)
# convert the image to a gray colour scale
image_2d = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
# find edges in the image
# Edge detection identifies points where there are discontinuities (i.e. at which the image brightness changes sharply)
# https://dsp.stackexchange.com/questions/10736/which-sigma-to-use-for-edge-detection
edges = feature.canny(image_2d, sigma=0.3) # the smaller sigma, the more edges detected
#plt.imshow(edges)
# ~edges make the edge to become the background, so that we can compute how far away we are from the edges
dt = ndimage.distance_transform_edt(~edges)
# find coordinates of the peaks
peak_idx = feature.peak_local_max(dt, min_distance=1, indices = False) # indices = false for watershed, indices = true for the distance transform
# kaspar min_distance = 10
# claudia_benin = 5
#peak_idx = feature.peak_local_max(dt, num_peaks=10, indices = False)
# peak_idx = feature.peak_local_max(dt,footprint=np.ones((50, 50)), indices = False)
markers = measure.label(peak_idx) # label connected regions based on the peaks, number of peaks = number of regions in the image
# Say which color range of the image should be labelled
watershed_mask = image_2d.copy()
#thres = 60 # do the segmentation for black regions in the image only (60)
watershed_mask[image_2d <= thres] = 255
watershed_mask[image_2d > thres] = 0
# get labelled regions in the image
labels = segmentation.watershed(-dt, markers, mask=watershed_mask) # black regions as peaks
#print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
mask = labels.copy()
mask[mask > 0] = 255
return mask
def edge_detection_blur(image, thres):
#blur the image to reduce the number of edges
blurred = cv2.blur(image, (5,5))
# convert the image to a gray colour scale
image_2d = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
# find edges in the image
# Edge detection identifies points where there are discontinuities (i.e. at which the image brightness changes sharply)
# https://dsp.stackexchange.com/questions/10736/which-sigma-to-use-for-edge-detection
edges = feature.canny(image_2d, sigma=0.3) # the smaller sigma, the more edges detected
#plt.imshow(edges)
# ~edges make the edge to become the background, so that we can compute how far away we are from the edges
dt = ndimage.distance_transform_edt(~edges)
# find coordinates of the peaks
peak_idx = feature.peak_local_max(dt, min_distance=1, indices = False) # indices = false for watershed, indices = true for the distance transform
# kaspar min_distance = 10
# claudia_benin = 5
#peak_idx = feature.peak_local_max(dt, num_peaks=10, indices = False)
# peak_idx = feature.peak_local_max(dt,footprint=np.ones((50, 50)), indices = False)
markers = measure.label(peak_idx) # label connected regions based on the peaks, number of peaks = number of regions in the image
# Say which color range of the image should be labelled
watershed_mask = image_2d.copy()
#thres = 60 # do the segmentation for black regions in the image only (60)
watershed_mask[image_2d <= thres] = 255
watershed_mask[image_2d > thres] = 0
# get labelled regions in the image
labels = segmentation.watershed(-dt, markers, mask=watershed_mask) # black regions as peaks
#print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
mask = labels.copy()
mask[mask > 0] = 255
return mask