id
stringlengths 21
22
| content
stringlengths 169
2.1k
| max_stars_repo_path
stringlengths 26
74
|
|---|---|---|
human-eval-bia_data_1
|
def apply_otsu_threshold_and_count_postiive_pixels(image):
"""
Takes an image, applies Otsu's threshold method to it to create a binary image and
counts the positive pixels.
"""
def check(candidate):
import numpy as np
assert candidate(np.asarray([
[0,0,0,0,0],
[1,1,1,0,0],
[1,1,1,0,0],
[1,0,0,0,0],
[0,0,0,1,0],
])) == 8
assert candidate(np.asarray([
[0,0,0,0,0],
[0,1,0,0,0],
[1,2,1,0,0],
[0,1,3,4,0],
[0,1,4,1,0],
])) == 4
assert candidate(np.asarray([
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
])) == 0
import skimage
import numpy as np
binary_image = image > skimage.filters.threshold_otsu(image)
result = np.sum(binary_image)
return result
|
../test_cases/apply_otsu_threshold_and_count_postiive_pixels.ipynb
|
human-eval-bia_data_2
|
def binary_closing(binary_image, radius:int=1):
"""
Applies binary closing to a binary_image with a square footprint with a given radius.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,1,0,1,0,0],
[0,0,1,0,1,0,0],
[0,0,1,0,1,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
]))
reference = np.asarray([
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
assert np.array_equal(reference, result)
import numpy as np
import skimage
size = radius * 2 + 1
return skimage.morphology.binary_closing(binary_image, footprint=np.ones((size, size)))
|
../test_cases/binary_closing.ipynb
|
human-eval-bia_data_3
|
def binary_skeleton(binary_image):
"""
Applies skeletonization to a 2D binary image.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[0,1,1,1,1,1,0],
[0,1,1,1,1,1,0],
[0,0,0,0,0,0,0],
]))
reference = np.asarray([
[0,0,0,0,0,0,0],
[0,0,0,1,0,0,0],
[0,0,0,1,0,0,0],
[0,0,0,1,0,0,0],
[0,1,1,1,1,1,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
# we're accepting a little error because for example
# the center pixel or the ending pixel may vary,
# depending on the implementation
assert np.abs(np.abs(result) - np.abs(reference)).sum() <= 3
from skimage.morphology import skeletonize
return skeletonize(binary_image)
|
../test_cases/binary_skeleton.ipynb
|
human-eval-bia_data_4
|
def bland_altman(dataframe, column1:str, column2:str):
"""
Takes two specified columns from a given dataframe and applies Bland-Altman-Analysis to them.
Therefore, it adds two new columns, one called 'mean' containing the mean of the two corresponding values,
and one called 'diff' containing the difference between the two.
"""
def check(candidate):
import pandas as pd
import numpy as np
df = pd.DataFrame(
{
'a':[1,2,3,0],
'b':[2,2,3,6]
}
)
candidate(df, 'a', 'b')
assert len(df.columns) == 4
mean_column = df['mean']
diff_column = df['diff']
assert np.array_equal([1.5, 2, 3, 3], mean_column)
assert np.array_equal([1, 0,0, 6], diff_column) or \
np.array_equal([-1, 0,0,-6], diff_column)
import scipy
data1 = dataframe[column1]
data2 = dataframe[column2]
dataframe['mean'] = (data1 + data2) / 2
dataframe['diff'] = data2 - data1
return dataframe
|
../test_cases/bland_altman.ipynb
|
human-eval-bia_data_5
|
def combine_columns_of_tables(dataframe1, dataframe2, index):
"""
This function combines to dataframes and makes sure the data is merged
using the given index column, which must be present in both dataframes.
The dataframes should be merged in a way that no data is lost and missing
fields are filled with NaN.
"""
def check(candidate):
import pandas as pd
import numpy as np
table1 = pd.DataFrame({
"label": [1, 2, 3],
"circularity": [0.3, 0.5, 0.7],
"elongation": [2.3, 3.4, 1.2],
})
table2 = pd.DataFrame({
"label": [3, 2, 1, 4],
"area": [22, 32, 25, 18],
"skewness": [0.5, 0.6, 0.3, 0.3],
})
reference = pd.DataFrame({
"label": [1, 2, 3, 4],
"circularity": [0.3, 0.5, 0.7, np.nan],
"elongation": [2.3, 3.4, 1.2, np.nan],
"area": [25, 32, 22, 18],
"skewness": [0.3, 0.6, 0.5, 0.3],
})
result = candidate(table1, table2, "label")
comparison = result.compare(reference)
assert len(comparison.columns) == 0
assert len(comparison.index) == 0
import pandas as pd
return pd.merge(dataframe1, dataframe2, how='outer', on='label')
|
../test_cases/combine_columns_of_tables.ipynb
|
human-eval-bia_data_6
|
def convex_hull_measure_area(point_cloud):
"""
Take a 3D point_cloud, determines the convex hull around the points and returns the surface area of the convex hull.
"""
def check(candidate):
point_cloud = [[0,1,0],[0,1,1],[0,0,0],[0,0,1],[1,1,0],[1,1,1],[1,0,0],[1,0,1]]
assert abs(candidate(point_cloud) - 6) < 0.001
point_cloud = [[0,1,0],[0,1,1],[0,0,0],[0,0,1],[2,1,0],[2,1,1],[2,0,0],[2,0,1]]
assert abs(candidate(point_cloud) - 10) < 0.001
import vedo
convex_hull = vedo.shapes.ConvexHull(point_cloud)
return convex_hull.area()
|
../test_cases/convex_hull_measure_area.ipynb
|
human-eval-bia_data_7
|
def convolve_images(image, kernel_image):
"""
Convolve an image with a kernel_image and return the result
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,0,0],
])
kernel= np.asarray([
[0,1,0],
[1,1,1],
[0,2,0],
])
reference = np.asarray([
[0,1,0,0,0,0,0],
[1,1,1,0,0,0,0],
[0,2,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,1,1,1,0],
[0,0,0,0,2,0,0],
])
result = candidate(image, kernel)
assert np.allclose(reference, result)
image = np.asarray([
[0,0,0,0,0,0,0],
[0,1,1,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
reference = np.asarray([
[0,1,1,0,0,0,0],
[1,2,2,1,0,0,0],
[0,2,2,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
result = candidate(image, kernel)
assert np.allclose(reference, result)
import scipy
return scipy.ndimage.convolve(image, kernel_image)
|
../test_cases/convolve_images.ipynb
|
human-eval-bia_data_8
|
def count_number_of_touching_neighbors(label_image):
"""
Takes a label image and returns a list of number of touching neighbors
for each labeled object.
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
[0,0,0,0,0],
[0,0,2,2,0],
[0,0,1,3,0],
[0,4,1,0,0],
[0,0,0,0,0],
])
print(candidate(label_image))
assert np.array_equal(candidate(label_image), [3, 2, 2, 1])
import numpy as np
import pyclesperanto_prototype as cle
touch_matrix = cle.generate_touch_matrix(label_image)
cle.set_row(touch_matrix, 0, 0)
cle.set_column(touch_matrix, 0, 0)
return np.asarray(cle.sum_y_projection(touch_matrix))[0,1:]
|
../test_cases/count_number_of_touching_neighbors.ipynb
|
human-eval-bia_data_9
|
def count_objects_over_time(binary_image_list):
"""
Takes a timelapse (list of binary images), counts the number of connected components and returns the resulting counts as list.
"""
def check(candidate):
import numpy as np
images = [
np.asarray([
[1,0,0,0],
[0,0,0,0],
[0,0,1,0],
[0,0,0,0],
]),
np.asarray([
[1,0,0,0],
[0,0,0,0],
[0,0,1,1],
[0,0,0,0],
]),
np.asarray([
[1,0,0,0],
[0,0,0,0],
[0,0,0,1],
[0,1,0,0],
]),
np.asarray([
[1,1,0,0],
[0,0,0,0],
[0,0,0,1],
[0,1,0,0],
]),
np.asarray([
[0,0,1,0],
[1,0,0,0],
[0,0,0,1],
[0,1,0,0],
]),
]
reference = [2,2,3,3,4]
result = candidate(images)
assert np.allclose(reference, result)
import numpy as np
from skimage.measure import label
labels = [label(binary_image) for binary_image in binary_image_list]
return [len(np.unique(label)) - 1 for label in labels]
|
../test_cases/count_objects_over_time.ipynb
|
human-eval-bia_data_10
|
def count_overlapping_regions(label_image_1, label_image_2):
"""
Takes two label images and counts how many objects in label_image_1 overlap
with any label in label_image_2 with at least one pixel.
It returns the count of overlapping objects.
"""
def check(candidate):
import numpy as np
label_image_1 = np.asarray([
[0,1,0,0,0],
[0,0,2,0,0],
[0,0,0,3,0],
[0,4,4,0,0],
[0,0,0,5,0],
])
label_image_2 = np.asarray([
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,1,0],
[0,0,1,0,0],
[0,0,0,1,0],
])
result = candidate(label_image_1, label_image_2)
assert result == 3
result = candidate(label_image_2, label_image_1)
assert result == 1
import skimage
import pandas as pd
stats = skimage.measure.regionprops_table(label_image_1, label_image_2, properties=('mean_intensity',))
return (pd.DataFrame(stats)['mean_intensity'] > 0).sum()
|
../test_cases/count_overlapping_regions.ipynb
|
human-eval-bia_data_11
|
def create_umap(dataframe):
"""
Takes a dataframe and computes a UMAP from all columns.
The two UMAP vectors are stored in the dataframe as `umap0` and `umap1`.
"""
def check(candidate):
import pandas as pd
df = pd.DataFrame(
{
"a":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.5],
"b":[0.1,0.2,0.3,0.3,0.4,0.4,0.4,0.5,0.5,0.5,0.6,0.6,0.6],
"c":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.4],
"d":[1.7,2.4,2.4,3.6,3.5,3.9,4.4,4.2,4.1,5.0,5.1,5.4,5.6]
}
)
candidate(df)
expected_columns = ['umap0', 'umap1']
# I'm not sure how to check if the umap columns contain a proper umap,
# but we can check if all expected columns exist.
for ec in expected_columns:
assert ec in df.columns
import umap
embedding = umap.UMAP().fit_transform(dataframe)
dataframe['umap0'] = embedding[:,0]
dataframe['umap1'] = embedding[:,1]
# no return value
|
../test_cases/create_umap.ipynb
|
human-eval-bia_data_12
|
def crop_quarter_image(image):
"""
Crops out the first half image in both dimensions (width and height).
The resulting image will be of quarter size compared to the original image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,3,0],
[0,0,0,0,0,0],
])
reference = np.asarray([
[0,0,0],
[0,1,0],
[0,0,0],
])
assert np.array_equal(candidate(image), reference)
width = image.shape[1]
height = image.shape[0]
return image[:int(width/2),:int(height/2)]
|
../test_cases/crop_quarter_image.ipynb
|
human-eval-bia_data_13
|
def deconvolve_image(image, kernel_image):
"""
Deconvolve an image with a kernel_image and return the result.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,1,0,0,0,0,0],
[1,1,1,0,0,0,0],
[0,2,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,1,1,1,0],
[0,0,0,0,2,0,0],
])
kernel= np.asarray([
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,1,0,0,0],
[0,0,1,1,1,0,0],
[0,0,0,2,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
reference = np.asarray([
[0,0,0,0,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,0,0],
])
result = candidate(image, kernel)
assert np.allclose(reference, result)
image = np.asarray([
[0,1,1,0,0,0,0],
[1,2,2,1,0,0,0],
[0,2,2,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
reference = np.asarray([
[0,0,0,0,0,0,0],
[0,1,1,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
])
result = candidate(image, kernel)
assert np.allclose(reference, result)
from scipy import fftpack
# adapted from: https://stackoverflow.com/questions/17473917/is-there-a-equivalent-of-scipy-signal-deconvolve-for-2d-arrays
star_fft = fftpack.fftshift(fftpack.fftn(image))
psf_fft = fftpack.fftshift(fftpack.fftn(kernel_image))
return fftpack.fftshift(fftpack.ifftn(fftpack.ifftshift(star_fft/psf_fft)))
|
../test_cases/deconvolve_image.ipynb
|
human-eval-bia_data_14
|
def detect_edges(image):
"""
Applies an edge-detection filter to an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
])
result = candidate(image)
left_column = result[:,0]
center_columns = result[:,1:4]
right_column = result[:,-1]
assert left_column.max() == 0 and left_column.min() == 0
assert center_columns.max() != 0 or center_columns.min() != 0
assert right_column.max() == 0 and left_column.min() == 0
from scipy.ndimage import sobel
filtered_image = sobel(image)
return filtered_image
|
../test_cases/detect_edges.ipynb
|
human-eval-bia_data_15
|
def expand_labels_without_overlap(label_image, radius:int=1):
"""
Takes a label_image and enlarges all labels by a given radius, without
labels overwriting each other.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[0,1,1,3,0],
[0,1,1,3,0],
[0,0,0,0,0],
[2,0,0,0,0],
]))
reference = np.asarray([
[0,1,1,3,0],
[1,1,1,3,3],
[1,1,1,3,3],
[2,1,1,3,0],
[2,2,0,0,0],
])
assert np.array_equal(reference, result)
import skimage
return skimage.segmentation.expand_labels(label_image, distance=radius)
|
../test_cases/expand_labels_without_overlap.ipynb
|
human-eval-bia_data_16
|
def extract_surface_measure_area(binary_volume_image):
"""
Take a 3D binary_volume_image, extracts the surface of the white (voxel value != 0) object
and returns the surface area of the object.
"""
def check(candidate):
import numpy as np
binary_volume_image = np.asarray([
[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
],[
[0,0,0,0],
[0,1,1,0],
[0,1,1,0],
[0,0,0,0],
],[
[0,0,0,0],
[0,1,1,0],
[0,1,1,0],
[0,0,0,0],
],[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
],
])
assert abs(candidate(binary_volume_image) - 20) < 1
binary_volume_image = np.zeros((3,3,3))
binary_volume_image[1,1,1] = 1
assert abs(candidate(binary_volume_image) - 3) < 1
import vedo
volume = vedo.Volume(binary_volume_image)
iso_surface = volume.isosurface()
return iso_surface.area()
|
../test_cases/extract_surface_measure_area.ipynb
|
human-eval-bia_data_17
|
def fit_circle(list_of_2d_points):
"""
Implements 2D circle fitting
Input: Collection of 2d points, represented as a list of lists [ [x0,y0], [x1,y1], ... ]
Output: Tuple: xc, yc, radius
"""
def check(candidate):
coordinates = [[1, 0], [-1, 0], [0, 1], [0, -1]]
xc, yc, r = candidate(coordinates)
tolerance = 0.05
assert xc > 0 - tolerance
assert xc < 0 + tolerance
assert yc > 0 - tolerance
assert yc < 0 + tolerance
assert r > 1 - tolerance
assert r < 1 + tolerance
import circle_fit as cf
xc, yc, r, _ = cf.least_squares_circle(list_of_2d_points)
return xc,yc,r
|
../test_cases/fit_circle.ipynb
|
human-eval-bia_data_18
|
def label_binary_image_and_count_labels(binary_image):
"""
Consumes as input a binary image, applies connected component labeling to it,
counts the labeled objects and returns their count as single number.
"""
def check(candidate):
import numpy as np
assert candidate(np.asarray([
[0,0,0,0,0],
[0,1,0,0,0],
[0,0,0,0,0],
[1,0,0,0,0],
[0,0,0,1,0],
])) == 3
assert candidate(np.asarray([
[0,0,0,0,0],
[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
])) == 1
assert candidate(np.asarray([
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
])) == 0
import skimage
import numpy as np
label_image = skimage.measure.label(binary_image)
return len(np.unique(label_image)) - 1
|
../test_cases/label_binary_image_and_count_labels.ipynb
|
human-eval-bia_data_19
|
def label_sequentially(label_image):
"""
Takes a label_image with n labels and relabels the objects,
to make sure all integer labels between 0 and n are used.
No gaps are there.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,1,0,0,0],
[0,0,0,0,0],
[0,4,0,0,0],
[0,0,5,0,0],
[0,0,0,6,0],
]))
# -1 becaue background counts
assert len(np.unique(result)) - 1 == 4
assert result.max() == 4
assert result.shape[0] == 5
assert result.shape[1] == 5
import skimage
return skimage.segmentation.relabel_sequential(label_image)[0]
|
../test_cases/label_sequentially.ipynb
|
human-eval-bia_data_20
|
def list_image_files_in_folder(folder_location):
"""
Lists all image files in a folder.
"""
def check(candidate):
list_files = candidate("../example_data/S-BIAD634/images_and_stuff/")
assert "Ganglioneuroblastoma_8.tif" in list_files
assert "Ganglioneuroblastoma_9.tif" in list_files
assert "Ganglioneuroblastoma_10.tif" in list_files
assert len(list_files) == 3
import os
supported_fileendings = [".tif", ".jpg", ".png"]
return [fn for fn in os.listdir(folder_location) if str(fn[-4:]) in supported_fileendings]
|
../test_cases/list_image_files_in_folder.ipynb
|
human-eval-bia_data_21
|
def map_pixel_count_of_labels(label_image):
"""
Takes a label_image, determines the pixel-count per label and creates an image where the label values are replaced by the corresponding pixel count.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[1,2,2,0,0],
[1,2,2,0,0],
[1,0,0,3,0],
[0,0,0,4,0],
]))
reference = np.asarray([
[0,0,0,0,0],
[3,4,4,0,0],
[3,4,4,0,0],
[3,0,0,1,0],
[0,0,0,1,0],
])
assert np.array_equal(reference, result)
import pyclesperanto_prototype as cle
return cle.pixel_count_map(label_image)
|
../test_cases/map_pixel_count_of_labels.ipynb
|
human-eval-bia_data_22
|
def mask_image(image, mask):
"""
Takes a 2D input image and a 2D binary mask image, then applies the mask to the input image and returns the result.
"""
def check(candidate):
import numpy as np
image = [
[2,2,2,2,2],
[2,2,3,2,2],
[2,3,3,3,2],
[2,2,3,2,2],
[2,2,2,2,2],
]
mask = [
[0,0,0,0,0],
[0,0,1,0,0],
[0,1,1,1,0],
[0,0,1,0,0],
[0,0,0,0,0],
]
reference = [
[0,0,0,0,0],
[0,0,3,0,0],
[0,3,3,3,0],
[0,0,3,0,0],
[0,0,0,0,0],
]
masked_image = candidate(image,mask)
assert np.array_equal(masked_image, reference)
import numpy as np
image = np.asarray(image)
mask = np.asarray(mask)
return image * mask
|
../test_cases/mask_image.ipynb
|
human-eval-bia_data_23
|
def maximum_intensity_projection(image):
"""
Performs a maximum intensity projection along the first axis of an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,3,0],
[0,0,0,0,0,0],
])
reference = np.asarray(
[0,4,0,0,3,0]
)
assert np.array_equal(candidate(image), reference)
import numpy as np
return np.asarray(image).max(axis=0)
|
../test_cases/maximum_intensity_projection.ipynb
|
human-eval-bia_data_24
|
def mean_squared_error(image1, image2):
"""
Computes the mean-squared-error of two images compared pixel-by-pixel
"""
def check(candidate):
image1 = [
[0,0,0,0,0],
[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,2,0],
[0,0,0,0,0],
]
image2 = [
[0,0,0,0,0],
[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,2,0],
[0,0,0,0,0],
]
mse = candidate(image1,image2)
print(mse)
assert mse == 0
image3 = [
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
]
mse = candidate(image1,image3)
print(mse)
assert mse == 5 / 25
import numpy as np
image1 = np.asarray(image1)
image2 = np.asarray(image2)
return ((image1 - image2)**2).mean()
# adapted from : https://stackoverflow.com/questions/16774849/mean-squared-error-in-numpy
|
../test_cases/mean_squared_error.ipynb
|
human-eval-bia_data_25
|
def mean_std_column(dataframe, column:str):
"""
Computes the mean average and standard deviation of a specified column
in a given dataframe and returns these two values.
"""
def check(candidate):
import pandas as pd
df = pd.DataFrame(
{
"a":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.5],
"b":[0.1,0.2,0.3,0.3,0.4,0.4,0.4,0.5,0.5,0.5,0.6,0.6,0.6],
"c":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.4],
"d":[1.7,2.4,2.4,3.6,3.5,3.9,4.4,4.2,4.1,5.0,5.1,5.4,5.6]
}
)
result_mean, result_std = candidate(df, "a")
assert abs(result_mean - 3.938) < 0.001
assert abs(result_std - 1.170) < 0.001
result_mean, result_std = candidate(df, "b")
assert abs(result_mean - 0.415) < 0.001
assert abs(result_std - 0.151) < 0.001
result_mean, result_std = candidate(df, "c")
assert abs(result_mean - 3.931) < 0.001
assert abs(result_std - 1.160) < 0.001
result_mean, result_std = candidate(df, "d")
assert abs(result_mean - 3.946) < 0.001
assert abs(result_std - 1.168) < 0.001
import numpy as np
data = dataframe[column]
return np.mean(data), np.std(data)
|
../test_cases/mean_std_column.ipynb
|
human-eval-bia_data_26
|
def measure_aspect_ratio_of_regions(label_image):
"""
Takes a label image and returns a pandas dataframe
with measurements for aspect_ratio of the objects
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
[0,1,1,0,0,3,3],
[0,1,1,0,0,3,3],
[0,2,2,2,2,3,3],
[0,2,2,2,2,3,3],
[0,4,4,4,0,3,3],
[0,4,4,4,0,3,3],
[0,4,4,4,0,0,0],
])
result = candidate(label_image)
print(result['aspect_ratio'])
assert np.allclose(result['aspect_ratio'], [1,2,3,1], atol=0.5)
import skimage
import pandas as pd
stats = pd.DataFrame(skimage.measure.regionprops_table(label_image, properties=('minor_axis_length', 'major_axis_length')))
stats['aspect_ratio'] = stats['major_axis_length'] / stats['minor_axis_length']
return stats
|
../test_cases/measure_aspect_ratio_of_regions.ipynb
|
human-eval-bia_data_27
|
def measure_intensity_of_labels(label_image, intensity_image):
"""
Takes a label image and an intensity image, and returns a list of mean intensities
of all pixels in the intensity image, belonging to a given label.
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
[0,1,0,0,0],
[0,0,0,0,0],
[0,2,2,2,2],
[0,3,3,0,0],
[0,0,0,4,0],
])
intensity_image = np.asarray([
[0,2,0,0,0],
[0,0,0,0,0],
[0,3,3,4,4],
[0,3,3,0,0],
[0,0,0,5,0],
])
result = candidate(label_image, intensity_image)
assert np.array_equal([2,3.5,3,5], result)
import skimage
stats = skimage.measure.regionprops(label_image, intensity_image)
return [s.mean_intensity for s in stats]
|
../test_cases/measure_intensity_of_labels.ipynb
|
human-eval-bia_data_28
|
def measure_intensity_over_time(image_list):
"""
Takes a timelapse (list of images), measures the average intensity over time and returns the resulting measurements as list.
"""
def check(candidate):
import numpy as np
images = [
np.asarray([[0,1],[1,1]]),
np.asarray([[0,2],[2,2]]),
np.asarray([[0,3],[3,3]]),
np.asarray([[0,3],[2,2]]),
np.asarray([[0,2],[2,1]]),
np.asarray([[0,1],[1,0]]),
]
reference = [0.75, 1.5, 2.25, 7/4, 5/4, 0.5]
result = candidate(images)
assert np.allclose(reference, result, atol=0.001)
import numpy as np
return np.asarray(image_list).mean(axis=(1,2))
|
../test_cases/measure_intensity_over_time.ipynb
|
human-eval-bia_data_29
|
def measure_mean_image_intensity(image):
"""
Takes an image and returns its mean intensity
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
]))
assert result == 3
import numpy as np
return np.asarray(image).mean()
|
../test_cases/measure_mean_image_intensity.ipynb
|
human-eval-bia_data_30
|
def measure_pixel_count_of_labels(label_image):
"""
Takes a label image and returns a list of counts of number of pixels per label.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,1,0,0,0],
[0,0,0,0,0],
[0,2,2,2,0],
[0,3,3,0,0],
[0,0,0,4,0],
]))
assert np.array_equal([1,3,2,1], result)
import skimage
stats = skimage.measure.regionprops(label_image)
return [s.area for s in stats]
|
../test_cases/measure_pixel_count_of_labels.ipynb
|
human-eval-bia_data_31
|
def measure_properties_of_regions(label_image, intensity_image):
"""
Takes a label image and an intensity image, and returns pandas dataframe
with measurements for area, perimeter and mean_intensity.
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
[0,1,0,0,0],
[0,0,0,0,0],
[0,2,2,2,2],
[0,3,3,0,0],
[0,0,0,4,0],
])
intensity_image = np.asarray([
[0,2,0,0,0],
[0,0,0,0,0],
[0,3,3,4,4],
[0,3,3,0,0],
[0,0,0,5,0],
])
result = candidate(label_image, intensity_image)
assert "mean_intensity" in result.columns
assert "area" in result.columns
assert "perimeter" in result.columns
assert len(result.columns) == 3
assert len(result) == 4
import skimage
import pandas as pd
stats = skimage.measure.regionprops_table(label_image, intensity_image, properties=('area', 'perimeter', 'mean_intensity'))
return pd.DataFrame(stats)
|
../test_cases/measure_properties_of_regions.ipynb
|
human-eval-bia_data_32
|
def open_image_read_voxel_size(image_filename):
"""
Reads an image file and return its voxel size in Z-Y-X order.
"""
def check(candidate):
voxel_size = candidate("../example_data/noise.ome.tif")
assert voxel_size[0] == 0.5
assert voxel_size[1] == 0.2
assert voxel_size[2] == 0.2
voxel_size = candidate("../example_data/noise.tif")
assert voxel_size[0] == 0.5
assert voxel_size[1] == 0.2
assert voxel_size[2] == 0.2
from aicsimageio import AICSImage
aics_image = AICSImage(image_filename)
return aics_image.physical_pixel_sizes.Z, \
aics_image.physical_pixel_sizes.Y, \
aics_image.physical_pixel_sizes.X
|
../test_cases/open_image_read_voxel_size.ipynb
|
human-eval-bia_data_33
|
def open_image_return_dimensions(image_file_location):
"""
Opens an image and returns its dimensions
"""
def check(candidate):
shape = candidate("../example_data/blobs.tif")
assert shape[0] == 254
assert shape[1] == 256
from skimage.io import imread
image = imread(image_file_location)
return image.shape
|
../test_cases/open_image_return_dimensions.ipynb
|
human-eval-bia_data_34
|
def open_nifti_image(image_file_location):
"""
This function loads a nifti image from the file at image_location and returns the image data as a numpy array.
"""
def check(candidate):
import numpy as np
reference = np.ones((5, 5, 5), dtype=np.int16)
image_location = '../example_data/test3d.nii.gz'
data = candidate(image_location)
assert np.array_equal(data, reference)
import nibabel as nib
import numpy as nb
img = nib.load(image_file_location)
data = img.get_fdata()
return(data)
|
../test_cases/open_nifti_image.ipynb
|
human-eval-bia_data_35
|
def open_zarr(zarr_file_location):
"""
Opens a zarr file and returns the array
"""
def check(candidate):
array = candidate("../example_data/one-dimensional.zarr")
import numpy as np
assert np.all(array == np.arange(10))
import zarr
array = zarr.load(zarr_file_location)
return array
|
../test_cases/open_zarr.ipynb
|
human-eval-bia_data_36
|
def pair_wise_correlation_matrix(dataframe):
"""
Takes a pandas dataframe and computes for all columns their Pearson's correlation coefficient
for all columns in the dataframe. For n columns, this is a n x n matrix of coefficients.
The matrix is returned as dataframe.
"""
def check(candidate):
import pandas as pd
df = pd.DataFrame(
{
"a":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.5],
"b":[0.1,0.2,0.3,0.3,0.4,0.4,0.4,0.5,0.5,0.5,0.6,0.6,0.6],
"c":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.4],
"d":[1.7,2.4,2.4,3.6,3.5,3.9,4.4,4.2,4.1,5.0,5.1,5.4,5.6],
"e":[1.7,2.4,2.4,3.6,3.5,3.9,4.4,4.2,4.1,5.0,5.1,5.4,5.6]
}
)
result = candidate(df)
print(result)
assert 'a' in result.columns
assert 'b' in result.columns
assert 'c' in result.columns
assert 'd' in result.columns
assert 'e' in result.columns
assert abs(result['a'].iloc[0] - 1) < 0.0001
assert abs(result['b'].iloc[1] - 1) < 0.0001
assert abs(result['c'].iloc[2] - 1) < 0.0001
assert abs(result['d'].iloc[3] - 1) < 0.0001
assert abs(result['e'].iloc[4] - 1) < 0.0001
# columns d and e are identical
assert abs(result['e'].iloc[3] - 1) < 0.0001
assert result.size == 25
return dataframe.corr()
|
../test_cases/pair_wise_correlation_matrix.ipynb
|
human-eval-bia_data_37
|
def radial_intensity_profile(image, xc, yc):
"""
Computes the radial intensity profile of an image around a given coordinate
Inputs:
- image: 2d numpy array
- xy, yc: the center coordinates
Output:
- an array containing the average intensities
"""
def check(candidate):
import numpy as np
xc = 5
yc = 5
x, y = np.indices(np.zeros(shape=(11,11)).shape)
distance_image = np.sqrt((x - xc)**2 + (y - yc)**2)
result = candidate(distance_image, xc, yc)
reference = [0, 1, 2, 3, 4, 5]
assert np.allclose(reference, result[0:len(reference)], atol=0.5)
import numpy as np
y, x = np.indices((image.shape))
r = np.sqrt((x - xc)**2 + (y - yc)**2)
r = r.astype(int)
summed_values = np.bincount(r.ravel(), image.ravel())
number_of_pixels_at_r = np.bincount(r.ravel())
radial_profile = summed_values / number_of_pixels_at_r
return radial_profile
|
../test_cases/radial_intensity_profile.ipynb
|
human-eval-bia_data_38
|
def region_growing_segmentation(image, point):
"""
Segments an image using the region-growing/flood filling
starting from a single point.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[1,1,1,1,1,1,1,1,1,1],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[1,1,1,1,1,1,1,1,1,1],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
]), (5,5))
assert result.sum() * 1 == 16
assert result.min() == 0
assert result.max() == 1
import skimage
return skimage.segmentation.flood(image, point)
|
../test_cases/region_growing_segmentation.ipynb
|
human-eval-bia_data_39
|
def remove_labels_on_edges(label_image):
"""
Takes a label_image and removes all objects which touch the image border.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[1,2,2,0,0],
[1,2,2,0,0],
[1,0,0,3,0],
[0,0,0,4,0],
]))
# -1 becaue background counts
assert len(np.unique(result)) - 1 == 2
assert result.shape[0] == 5
assert result.shape[1] == 5
import skimage
return skimage.segmentation.clear_border(label_image)
|
../test_cases/remove_labels_on_edges.ipynb
|
human-eval-bia_data_40
|
def remove_noise_edge_preserving(image, radius:int=1):
"""
Applies an edge-preserving noise-removal filter to an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,1,2,2,2],
[1,2,2,2,2],
[1,1,2,2,2],
[1,1,1,2,2],
[1,1,2,2,2],
])
reference = np.asarray([
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
])
assert np.array_equal(candidate(image), reference)
assert candidate(image, radius=5).mean() == 2
from scipy.ndimage import median_filter
filtered_image = median_filter(image, size=radius * 2 + 1)
return filtered_image
|
../test_cases/remove_noise_edge_preserving.ipynb
|
human-eval-bia_data_41
|
def remove_small_labels(label_image, size_threshold:int=0):
"""
Takes a label_image and removes all objects that are smaller than a given size_threshold.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[1,2,2,0,0],
[1,2,2,0,0],
[1,0,0,3,0],
[0,0,0,4,0],
]), size_threshold=2)
reference = np.asarray([
[0,0,0,0,0],
[1,2,2,0,0],
[1,2,2,0,0],
[1,0,0,0,0],
[0,0,0,0,0],
])
assert np.array_equal(reference, result)
import pyclesperanto_prototype as cle
return cle.exclude_small_labels(label_image, maximum_size=size_threshold)
|
../test_cases/remove_small_labels.ipynb
|
human-eval-bia_data_42
|
def return_hello_world():
"""
Returns the string "hello world".
"""
def check(candidate):
assert candidate() == "hello world"
return "hello world"
|
../test_cases/return_hello_world.ipynb
|
human-eval-bia_data_43
|
def rgb_to_grey_image_transform(rgb_image, r:float, g:float, b:float):
"""
Convert an RGB image to a single-channel gray scale image with
configurable weights r, g and b.
The weights are normalized to be 1 in sum.
"""
def check(candidate):
import numpy as np
assert np.allclose(candidate([[[0,1,2], [3,3,3]]], 0.5, 0.25, 0.25),
[[0.75, 3]], atol=0.0001)
assert np.allclose(candidate([[[0,1,2], [3,3,3]]], 1, 2, 2),
[[1.2, 3]], atol=0.0001)
rgb_sum = r + g + b
r = r / rgb_sum
g = g / rgb_sum
b = b / rgb_sum
import numpy as np
channel_r = np.asarray(rgb_image)[...,0]
channel_g = np.asarray(rgb_image)[...,1]
channel_b = np.asarray(rgb_image)[...,2]
grey_image = channel_r * r + \
channel_g * g + \
channel_b * b
return grey_image
|
../test_cases/rgb_to_grey_image_transform.ipynb
|
human-eval-bia_data_44
|
def rotate_image_by_90_degrees(image):
"""
Rotates an image by 90 degrees clockwise around the center of the image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,3,0],
[0,0,0,0,0,0],
])
reference = np.asarray([
[0,0,0,0,0,0],
[0,4,0,0,1,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,3,0,0,2,0],
[0,0,0,0,0,0],
])
assert np.array_equal(candidate(image), reference)
import pyclesperanto_prototype as cle
return cle.rotate(image, angle_around_z_in_degrees=90)
|
../test_cases/rotate_image_by_90_degrees.ipynb
|
human-eval-bia_data_45
|
def subsample_image(image, n:int=2):
"""
Subsamples an image by skipping every n'th pixel in X and Y.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,2,3,4,5,6],
[7,8,9,0,1,2],
[3,4,5,6,7,8],
[9,0,1,2,3,4],
[5,6,7,8,9,0],
[1,2,3,4,5,6],
])
reference = np.asarray([
[1,3,5],
[3,5,7],
[5,7,9],
])
assert np.array_equal(candidate(image, n=2), reference)
reference = np.asarray([
[1,4],
[9,2],
])
assert np.array_equal(candidate(image, n=3), reference)
return image[::n,::n]
|
../test_cases/subsample_image.ipynb
|
human-eval-bia_data_46
|
def subtract_background_tophat(image, radius:int=1):
"""
Applies a top-hat filter with a given radius to an image with dark background (low values) and bright foreground (high values).
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,1,1,1,1,1,1,1],
[1,2,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1],
[1,1,4,1,1,1,1,1],
[1,1,1,1,1,1,2,1],
[1,1,1,1,1,1,1,1],
])
reference = np.asarray([
[0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0],
[0,0,3,0,0,0,0,0],
[0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0],
])
assert np.array_equal(candidate(image), reference)
assert np.array_equal(candidate(reference), reference)
# note this test case is kept simple to allow also other top-hat implementations (e.g. with a square footpint) to pass
from skimage.morphology import white_tophat
from skimage.morphology import disk
filtered_image = white_tophat(image, footprint=disk(radius))
return filtered_image
|
../test_cases/subtract_background_tophat.ipynb
|
human-eval-bia_data_47
|
def sum_images(image1, image2):
"""
Sums two images pixel-by-pixel and returns the result
"""
def check(candidate):
import numpy as np
image1 = np.random.random((5,6))
image2 = np.random.random((5,6))
sum_image = image1 + image2
assert np.allclose(candidate(image1, image2), sum_image)
image1 = [[1,2,3], [4,5,6]]
image2 = [[5,6,7], [0,1,2]]
sum_image = [[6,8,10],[4,6,8]]
assert np.allclose(candidate(image1, image2), sum_image)
import numpy as np
return np.asarray(image1) + np.asarray(image2)
|
../test_cases/sum_images.ipynb
|
human-eval-bia_data_48
|
def sum_intensity_projection(image):
"""
Performs a maximum intensity projection along the first axis of an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,3,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,6,0],
[0,0,0,0,0,0],
])
reference = np.asarray(
[0,5,0,0,9,0]
)
assert np.array_equal(candidate(image), reference)
import numpy as np
return np.asarray(image).sum(axis=0)
|
../test_cases/sum_intensity_projection.ipynb
|
human-eval-bia_data_49
|
def tiled_image_processing(image, radius, tile_size):
"""
Apply a maximum filter with a given radius to the image using a tile-by-tile strategy.
The tile_size denotes the size of the tiles in X and Y.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[1,0,0,1,0,0],
[0,0,0,0,0,0],
[0,0,2,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,0,0],
])
# this reference image has tile-border artefacts,
# the maximum-filter does not consider pixels beyond
# the 3x3 border
reference = np.asarray([
[1,1,0,1,1,0],
[1,1,0,1,1,0],
[1,1,0,1,1,0],
[0,2,2,1,1,0],
[0,2,2,1,1,0],
[0,0,0,1,1,0],
])
result = candidate(image, 1, 3)
assert np.array_equal(result, reference)
import dask
import dask.array as da
tiles = da.from_array(image, chunks=(tile_size, tile_size))
def procedure(image):
from scipy.ndimage import maximum_filter
return maximum_filter(image, size=radius*2+1)
# setup a lazy result (not computed yet)
tile_map = da.map_blocks(procedure, tiles)
# actually apply filter
result = tile_map.compute()
return result
|
../test_cases/tiled_image_processing.ipynb
|
human-eval-bia_data_50
|
def transpose_image_axes(image):
"""
Transposes the first two axes of an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,3,0],
[0,0,0,0,0,0],
])
reference = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,4,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,2,0,0,3,0],
[0,0,0,0,0,0],
])
assert np.array_equal(candidate(image), reference)
import numpy as np
return np.asarray(image).T
|
../test_cases/transpose_image_axes.ipynb
|
human-eval-bia_data_51
|
def t_test(dataframe, column1:str, column2:str):
"""
Takes two specified columns from a given dataframe and applies a paired T-test to it to determine the p-value.
"""
def check(candidate):
import pandas as pd
df = pd.DataFrame(
{
"a":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.5],
"b":[0.1,0.2,0.3,0.3,0.4,0.4,0.4,0.5,0.5,0.5,0.6,0.6,0.6],
"c":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.3,5.4],
"d":[1.7,2.4,2.4,3.6,3.5,3.9,4.4,4.2,4.1,5.0,5.1,5.4,5.6]
}
)
result = candidate(df, "a","b")
assert 6e-8 > result > 5e-8
result = candidate(df, "a","c")
assert 0.4 > result > 0.3
import scipy
data1 = dataframe[column1]
data2 = dataframe[column2]
return scipy.stats.ttest_rel(data1, data2)[1]
|
../test_cases/t_test.ipynb
|
human-eval-bia_data_52
|
def workflow_batch_process_folder_count_labels(folder_location):
"""
This functions goes through all .tif image files in a specified folder,
loads the images and count labels each image.
It returns a dictionary with filenames and corresponding counts.
"""
def check(candidate):
counts = candidate("../example_data/S-BIAD634/groundtruth/")
assert counts["Ganglioneuroblastoma_0.tif"] == 300
assert counts["Ganglioneuroblastoma_1.tif"] == 398
assert counts["Ganglioneuroblastoma_2.tif"] == 368
assert counts["Ganglioneuroblastoma_3.tif"] == 378
assert counts["Ganglioneuroblastoma_4.tif"] == 363
assert len(counts.keys()) == 5
import os
from skimage.io import imread
import numpy as np
supported_fileendings = [".tif", ".jpg", ".png"]
file_list = [fn for fn in os.listdir(folder_location) if str(fn[-4:]) in supported_fileendings]
result = {}
for filename in file_list:
image = imread(folder_location + filename)
labels = np.unique(image).tolist()
labels.pop(0)
count = len(labels)
result[filename] = count
return result
|
../test_cases/workflow_batch_process_folder_count_labels.ipynb
|
human-eval-bia_data_53
|
def workflow_batch_process_folder_measure_intensity(image_folder_location, labels_folder_location):
"""
This functions goes through all .tif image files in a specified image folder
and corresponding label images in another labels folder.
It loads the images and corresponding labels, and measures min, mean and max intensity of all labels.
The function returns a dataframe with five columns: min_intensity, mean_intensity, max_intensity, label and filename.
"""
def check(candidate):
label_stats = candidate("../example_data/S-BIAD634/images/", "../example_data/S-BIAD634/groundtruth/")
assert label_stats['label'].max() == 398
assert label_stats['min_intensity'].min() == 7
assert label_stats['max_intensity'].max() == 255
assert label_stats.size == 9035
assert abs(label_stats['mean_intensity'].max() - 186) < 1
assert len(label_stats.columns) == 5
assert len(label_stats['mean_intensity']) == 1807
assert "filename" in label_stats.columns
assert "label" in label_stats.columns
assert "min_intensity" in label_stats.columns
assert "mean_intensity" in label_stats.columns
assert "max_intensity" in label_stats.columns
import os
import pandas as pd
from skimage.io import imread
from skimage.measure import regionprops
import numpy as np
supported_fileendings = [".tif", ".jpg", ".png"]
file_list = [fn for fn in os.listdir(image_folder_location) if str(fn[-4:]) in supported_fileendings]
result = []
for filename in file_list:
image = imread(image_folder_location + filename)[...,0]
labels = imread(labels_folder_location + filename)
stats = regionprops(labels, intensity_image=image)
for s in stats:
result.append({
"filename":filename,
"label":s.label,
"min_intensity":s.min_intensity,
"mean_intensity":s.mean_intensity,
"max_intensity":s.max_intensity,
})
return pd.DataFrame(result)
|
../test_cases/workflow_batch_process_folder_measure_intensity.ipynb
|
human-eval-bia_data_54
|
def workflow_segmentation_counting(image):
"""
This function segments objects in an image with intensity above average
and returns their count.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[0,1,0,0,0],
[0,0,0,2,0],
[0,1,0,0,0],
[0,0,0,0,0],
]))
assert result == 3
result = candidate(np.asarray([
[0,0,0,0,0],
[0,100,0,90,0],
[0,0,0,0,0],
[0,110,0,80,0],
[0,0,0,0,0],
]))
assert result == 4
import skimage
import numpy as np
average_intensity = np.asarray(image).mean()
binary = image > average_intensity
labels = skimage.measure.label(binary)
return labels.max()
|
../test_cases/workflow_segmentation_counting.ipynb
|
human-eval-bia_data_55
|
def workflow_segmentation_measurement_summary(image):
"""
This function implements a workflow consisting of these steps:
* threshold intensity input image using Otsu's method
* label connected components
* measure area of the labeled objects
* determine mean area of all objects
"""
def check(candidate):
import numpy as np
assert candidate(np.asarray([
[0,0,0,0,0],
[1,1,1,0,0],
[1,1,1,0,0],
[1,1,0,0,0],
[0,0,0,0,0],
])) == 8
assert candidate(np.asarray([
[1,1,0,1,1],
[1,1,0,0,0],
[0,0,0,1,1],
[1,1,0,1,1],
[0,0,0,0,0],
])) == 3
assert candidate(np.asarray([
[0,0,0,0,0],
[0,1,0,1,0],
[0,0,0,0,0],
[0,0,1,0,0],
[0,0,0,0,0],
])) == 1
import skimage
import numpy as np
binary_image = image > skimage.filters.threshold_otsu(image)
label_image = skimage.measure.label(binary_image)
stats = skimage.measure.regionprops(label_image)
areas = [s.area for s in stats]
return np.mean(areas)
|
../test_cases/workflow_segmentation_measurement_summary.ipynb
|
human-eval-bia_data_56
|
def workflow_segment_measure_umap(image):
"""
This function takes a single channel intensity image,
segments objects with intensity above half the maximum intensity,
labels connected components,
measures area, perimeter, mean_intensity, minor and major axis of the labeled objects,
and produces a UMAP from the given measurements.
The two UMAP vectors are saved as `umap0` and `umap1` togther with the measurements in a dataframe.
The function returns this dataframe.
"""
def check(candidate):
import numpy as np
images = np.asarray([
[1,0,0,0,1,0,1,1,0,0],
[1,0,1,0,0,0,0,0,0,0],
[1,0,0,0,1,0,1,0,1,0],
[1,0,1,0,0,0,0,0,1,0],
[1,0,0,0,0,0,0,0,0,0],
[1,0,0,1,0,1,1,0,1,0],
[1,0,0,1,0,1,0,0,1,0],
[1,0,0,1,0,0,0,1,1,0],
[1,0,0,0,0,1,0,0,0,0],
[1,0,1,0,0,0,0,0,0,0],
])
result = candidate(images)
expected_columns = ['area', 'perimeter', 'mean_intensity',
'minor_axis_length', 'major_axis_length',
'umap0', 'umap1']
# I'm not sure how to check if the umap columns contain a proper umap,
# but we can check if all expected columns exist.
for ec in expected_columns:
assert ec in result.columns
import numpy as np
import pandas as pd
import umap
from skimage.measure import label, regionprops_table
image = np.asarray(image)
# segment
binary = image > 0.5 * image.max()
labels = label(binary)
# measure
dataframe = pd.DataFrame(regionprops_table(labels, intensity_image=image,
properties=['area', 'perimeter', 'mean_intensity',
'minor_axis_length', 'major_axis_length']))
# append UMAP
embedding = umap.UMAP().fit_transform(dataframe)
dataframe['umap0'] = embedding[:,0]
dataframe['umap1'] = embedding[:,1]
return dataframe
|
../test_cases/workflow_segment_measure_umap.ipynb
|
human-eval-bia_data_57
|
def workflow_watershed_segmentation_correction_measurement(image):
"""
This function implements a workflow consisting of these steps:
* blurs the image a bit
* detect local minima in the blurred image
* apply watershed segmentation flooding the blurred image from the
detected minima to retrieve a label image
* remove all objects which touch the image border
* measure the area of all remaining objects together
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[1,1,1,1,1,1,1,1,1,1],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[1,1,1,1,1,1,1,1,1,1],
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
]))
# if only the 4x4 pixels are segmented:
assert result >= 16
# if it also considers borders as part of the center object:
assert result <= 36
import skimage
blurred = skimage.filters.gaussian(image, sigma=1)
minima = skimage.morphology.local_minima(blurred)
spots = skimage.measure.label(minima)
labels = skimage.segmentation.watershed(blurred, spots)
labels_without_border = skimage.segmentation.clear_border(labels)
binary = labels_without_border > 0
return binary.sum()
|
../test_cases/workflow_watershed_segmentation_correction_measurement.ipynb
|
README.md exists but content is empty.
- Downloads last month
- -