id stringlengths 21 22 | content stringlengths 169 2.1k | max_stars_repo_path stringlengths 26 74 |
|---|---|---|
human-eval-bia_data_1 | def apply_otsu_threshold_and_count_postiive_pixels(image):
"""
Takes an image, applies Otsu's threshold method to it to create a binary image and
counts the positive pixels.
"""
def check(candidate):
import numpy as np
assert candidate(np.asarray([
[0,0,0,0,0],
[1,1,1,0,0]... | ../test_cases/apply_otsu_threshold_and_count_postiive_pixels.ipynb |
human-eval-bia_data_2 | def binary_closing(binary_image, radius:int=1):
"""
Applies binary closing to a binary_image with a square footprint with a given radius.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,1,0,1,0,0],
... | ../test_cases/binary_closing.ipynb |
human-eval-bia_data_3 | def binary_skeleton(binary_image):
"""
Applies skeletonization to a 2D binary image.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[0,1,1,1,1,1,0],
... | ../test_cases/binary_skeleton.ipynb |
human-eval-bia_data_4 | def bland_altman(dataframe, column1:str, column2:str):
"""
Takes two specified columns from a given dataframe and applies Bland-Altman-Analysis to them.
Therefore, it adds two new columns, one called 'mean' containing the mean of the two corresponding values,
and one called 'diff' containing the differe... | ../test_cases/bland_altman.ipynb |
human-eval-bia_data_5 | def combine_columns_of_tables(dataframe1, dataframe2, index):
"""
This function combines to dataframes and makes sure the data is merged
using the given index column, which must be present in both dataframes.
The dataframes should be merged in a way that no data is lost and missing
fields are fille... | ../test_cases/combine_columns_of_tables.ipynb |
human-eval-bia_data_6 | def convex_hull_measure_area(point_cloud):
"""
Take a 3D point_cloud, determines the convex hull around the points and returns the surface area of the convex hull.
"""
def check(candidate):
point_cloud = [[0,1,0],[0,1,1],[0,0,0],[0,0,1],[1,1,0],[1,1,1],[1,0,0],[1,0,1]]
assert abs(candidate(point_c... | ../test_cases/convex_hull_measure_area.ipynb |
human-eval-bia_data_7 | def convolve_images(image, kernel_image):
"""
Convolve an image with a kernel_image and return the result
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
... | ../test_cases/convolve_images.ipynb |
human-eval-bia_data_8 | def count_number_of_touching_neighbors(label_image):
"""
Takes a label image and returns a list of number of touching neighbors
for each labeled object.
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
[0,0,0,0,0],
[0,0,2,2,0],
[0,0,1,3,0],
... | ../test_cases/count_number_of_touching_neighbors.ipynb |
human-eval-bia_data_9 | def count_objects_over_time(binary_image_list):
"""
Takes a timelapse (list of binary images), counts the number of connected components and returns the resulting counts as list.
"""
def check(candidate):
import numpy as np
images = [
np.asarray([
[1,0,0,0],
[0,... | ../test_cases/count_objects_over_time.ipynb |
human-eval-bia_data_10 | def count_overlapping_regions(label_image_1, label_image_2):
"""
Takes two label images and counts how many objects in label_image_1 overlap
with any label in label_image_2 with at least one pixel.
It returns the count of overlapping objects.
"""
def check(candidate):
import numpy as np
l... | ../test_cases/count_overlapping_regions.ipynb |
human-eval-bia_data_11 | def create_umap(dataframe):
"""
Takes a dataframe and computes a UMAP from all columns.
The two UMAP vectors are stored in the dataframe as `umap0` and `umap1`.
"""
def check(candidate):
import pandas as pd
df = pd.DataFrame(
{
"a":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,... | ../test_cases/create_umap.ipynb |
human-eval-bia_data_12 | def crop_quarter_image(image):
"""
Crops out the first half image in both dimensions (width and height).
The resulting image will be of quarter size compared to the original image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
... | ../test_cases/crop_quarter_image.ipynb |
human-eval-bia_data_13 | def deconvolve_image(image, kernel_image):
"""
Deconvolve an image with a kernel_image and return the result.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,1,0,0,0,0,0],
[1,1,1,0,0,0,0],
[0,2,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,1,0,0... | ../test_cases/deconvolve_image.ipynb |
human-eval-bia_data_14 | def detect_edges(image):
"""
Applies an edge-detection filter to an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
[1,1,2,2,2],
])
result = candidate(image)
left_colum... | ../test_cases/detect_edges.ipynb |
human-eval-bia_data_15 | def expand_labels_without_overlap(label_image, radius:int=1):
"""
Takes a label_image and enlarges all labels by a given radius, without
labels overwriting each other.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[0,1,1,3,0],
... | ../test_cases/expand_labels_without_overlap.ipynb |
human-eval-bia_data_16 | def extract_surface_measure_area(binary_volume_image):
"""
Take a 3D binary_volume_image, extracts the surface of the white (voxel value != 0) object
and returns the surface area of the object.
"""
def check(candidate):
import numpy as np
binary_volume_image = np.asarray([
[
... | ../test_cases/extract_surface_measure_area.ipynb |
human-eval-bia_data_17 | def fit_circle(list_of_2d_points):
"""
Implements 2D circle fitting
Input: Collection of 2d points, represented as a list of lists [ [x0,y0], [x1,y1], ... ]
Output: Tuple: xc, yc, radius
"""
def check(candidate):
coordinates = [[1, 0], [-1, 0], [0, 1], [0, -1]]
xc, yc, r = candidate(coord... | ../test_cases/fit_circle.ipynb |
human-eval-bia_data_18 | def label_binary_image_and_count_labels(binary_image):
"""
Consumes as input a binary image, applies connected component labeling to it,
counts the labeled objects and returns their count as single number.
"""
def check(candidate):
import numpy as np
assert candidate(np.asarray([
... | ../test_cases/label_binary_image_and_count_labels.ipynb |
human-eval-bia_data_19 | def label_sequentially(label_image):
"""
Takes a label_image with n labels and relabels the objects,
to make sure all integer labels between 0 and n are used.
No gaps are there.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,1,0,0,0],
... | ../test_cases/label_sequentially.ipynb |
human-eval-bia_data_20 | def list_image_files_in_folder(folder_location):
"""
Lists all image files in a folder.
"""
def check(candidate):
list_files = candidate("../example_data/S-BIAD634/images_and_stuff/")
assert "Ganglioneuroblastoma_8.tif" in list_files
assert "Ganglioneuroblastoma_9.tif" in list_files
as... | ../test_cases/list_image_files_in_folder.ipynb |
human-eval-bia_data_21 | def map_pixel_count_of_labels(label_image):
"""
Takes a label_image, determines the pixel-count per label and creates an image where the label values are replaced by the corresponding pixel count.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],... | ../test_cases/map_pixel_count_of_labels.ipynb |
human-eval-bia_data_22 | def mask_image(image, mask):
"""
Takes a 2D input image and a 2D binary mask image, then applies the mask to the input image and returns the result.
"""
def check(candidate):
import numpy as np
image = [
[2,2,2,2,2],
[2,2,3,2,2],
[2,3,3,3,2],
[2,2,3,2,2],
... | ../test_cases/mask_image.ipynb |
human-eval-bia_data_23 | def maximum_intensity_projection(image):
"""
Performs a maximum intensity projection along the first axis of an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,3,0]... | ../test_cases/maximum_intensity_projection.ipynb |
human-eval-bia_data_24 | def mean_squared_error(image1, image2):
"""
Computes the mean-squared-error of two images compared pixel-by-pixel
"""
def check(candidate):
image1 = [
[0,0,0,0,0],
[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,2,0],
[0,0,0,0,0],
]
image2 = [
[0,0,0,0,0],
... | ../test_cases/mean_squared_error.ipynb |
human-eval-bia_data_25 | def mean_std_column(dataframe, column:str):
"""
Computes the mean average and standard deviation of a specified column
in a given dataframe and returns these two values.
"""
def check(candidate):
import pandas as pd
df = pd.DataFrame(
{
"a":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0... | ../test_cases/mean_std_column.ipynb |
human-eval-bia_data_26 | def measure_aspect_ratio_of_regions(label_image):
"""
Takes a label image and returns a pandas dataframe
with measurements for aspect_ratio of the objects
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
[0,1,1,0,0,3,3],
[0,1,1,0,0,3,3],
[0,2,2,2,... | ../test_cases/measure_aspect_ratio_of_regions.ipynb |
human-eval-bia_data_27 | def measure_intensity_of_labels(label_image, intensity_image):
"""
Takes a label image and an intensity image, and returns a list of mean intensities
of all pixels in the intensity image, belonging to a given label.
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
... | ../test_cases/measure_intensity_of_labels.ipynb |
human-eval-bia_data_28 | def measure_intensity_over_time(image_list):
"""
Takes a timelapse (list of images), measures the average intensity over time and returns the resulting measurements as list.
"""
def check(candidate):
import numpy as np
images = [
np.asarray([[0,1],[1,1]]),
np.asarray([[0,2],[2,2]])... | ../test_cases/measure_intensity_over_time.ipynb |
human-eval-bia_data_29 | def measure_mean_image_intensity(image):
"""
Takes an image and returns its mean intensity
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
]))
assert r... | ../test_cases/measure_mean_image_intensity.ipynb |
human-eval-bia_data_30 | def measure_pixel_count_of_labels(label_image):
"""
Takes a label image and returns a list of counts of number of pixels per label.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,1,0,0,0],
[0,0,0,0,0],
[0,2,2,2,0],
[0,3,3,0,0],
... | ../test_cases/measure_pixel_count_of_labels.ipynb |
human-eval-bia_data_31 | def measure_properties_of_regions(label_image, intensity_image):
"""
Takes a label image and an intensity image, and returns pandas dataframe
with measurements for area, perimeter and mean_intensity.
"""
def check(candidate):
import numpy as np
label_image = np.asarray([
[0,1,0,0,0],
... | ../test_cases/measure_properties_of_regions.ipynb |
human-eval-bia_data_32 | def open_image_read_voxel_size(image_filename):
"""
Reads an image file and return its voxel size in Z-Y-X order.
"""
def check(candidate):
voxel_size = candidate("../example_data/noise.ome.tif")
assert voxel_size[0] == 0.5
assert voxel_size[1] == 0.2
assert voxel_size[2] == 0.2
voxel... | ../test_cases/open_image_read_voxel_size.ipynb |
human-eval-bia_data_33 | def open_image_return_dimensions(image_file_location):
"""
Opens an image and returns its dimensions
"""
def check(candidate):
shape = candidate("../example_data/blobs.tif")
assert shape[0] == 254
assert shape[1] == 256
from skimage.io import imread
image = imread(image_file_loc... | ../test_cases/open_image_return_dimensions.ipynb |
human-eval-bia_data_34 | def open_nifti_image(image_file_location):
"""
This function loads a nifti image from the file at image_location and returns the image data as a numpy array.
"""
def check(candidate):
import numpy as np
reference = np.ones((5, 5, 5), dtype=np.int16)
image_location = '../example_data/test3d.n... | ../test_cases/open_nifti_image.ipynb |
human-eval-bia_data_35 | def open_zarr(zarr_file_location):
"""
Opens a zarr file and returns the array
"""
def check(candidate):
array = candidate("../example_data/one-dimensional.zarr")
import numpy as np
assert np.all(array == np.arange(10))
import zarr
array = zarr.load(zarr_file_location)
return arra... | ../test_cases/open_zarr.ipynb |
human-eval-bia_data_36 | def pair_wise_correlation_matrix(dataframe):
"""
Takes a pandas dataframe and computes for all columns their Pearson's correlation coefficient
for all columns in the dataframe. For n columns, this is a n x n matrix of coefficients.
The matrix is returned as dataframe.
"""
def check(candidate):
... | ../test_cases/pair_wise_correlation_matrix.ipynb |
human-eval-bia_data_37 | def radial_intensity_profile(image, xc, yc):
"""
Computes the radial intensity profile of an image around a given coordinate
Inputs:
- image: 2d numpy array
- xy, yc: the center coordinates
Output:
- an array containing the average intensities
"""
def check(candidate):
import numpy ... | ../test_cases/radial_intensity_profile.ipynb |
human-eval-bia_data_38 | def region_growing_segmentation(image, point):
"""
Segments an image using the region-growing/flood filling
starting from a single point.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,0,1,0,0],
[1,1... | ../test_cases/region_growing_segmentation.ipynb |
human-eval-bia_data_39 | def remove_labels_on_edges(label_image):
"""
Takes a label_image and removes all objects which touch the image border.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[1,2,2,0,0],
[1,2,2,0,0],
[1,0,0,3,0],
[0,0,0... | ../test_cases/remove_labels_on_edges.ipynb |
human-eval-bia_data_40 | def remove_noise_edge_preserving(image, radius:int=1):
"""
Applies an edge-preserving noise-removal filter to an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,1,2,2,2],
[1,2,2,2,2],
[1,1,2,2,2],
[1,1,1,2,2],
[1,1,2,2,2],
])
... | ../test_cases/remove_noise_edge_preserving.ipynb |
human-eval-bia_data_41 | def remove_small_labels(label_image, size_threshold:int=0):
"""
Takes a label_image and removes all objects that are smaller than a given size_threshold.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[1,2,2,0,0],
[1,2,2,0,0],
... | ../test_cases/remove_small_labels.ipynb |
human-eval-bia_data_42 | def return_hello_world():
"""
Returns the string "hello world".
"""
def check(candidate):
assert candidate() == "hello world"
return "hello world" | ../test_cases/return_hello_world.ipynb |
human-eval-bia_data_43 | def rgb_to_grey_image_transform(rgb_image, r:float, g:float, b:float):
"""
Convert an RGB image to a single-channel gray scale image with
configurable weights r, g and b.
The weights are normalized to be 1 in sum.
"""
def check(candidate):
import numpy as np
assert np.allclose(candidate([[... | ../test_cases/rgb_to_grey_image_transform.ipynb |
human-eval-bia_data_44 | def rotate_image_by_90_degrees(image):
"""
Rotates an image by 90 degrees clockwise around the center of the image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,3,0],
... | ../test_cases/rotate_image_by_90_degrees.ipynb |
human-eval-bia_data_45 | def subsample_image(image, n:int=2):
"""
Subsamples an image by skipping every n'th pixel in X and Y.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,2,3,4,5,6],
[7,8,9,0,1,2],
[3,4,5,6,7,8],
[9,0,1,2,3,4],
[5,6,7,8,9,0],
[1,2,3,... | ../test_cases/subsample_image.ipynb |
human-eval-bia_data_46 | def subtract_background_tophat(image, radius:int=1):
"""
Applies a top-hat filter with a given radius to an image with dark background (low values) and bright foreground (high values).
"""
def check(candidate):
import numpy as np
image = np.asarray([
[1,1,1,1,1,1,1,1],
[1,2,1,1,1,1... | ../test_cases/subtract_background_tophat.ipynb |
human-eval-bia_data_47 | def sum_images(image1, image2):
"""
Sums two images pixel-by-pixel and returns the result
"""
def check(candidate):
import numpy as np
image1 = np.random.random((5,6))
image2 = np.random.random((5,6))
sum_image = image1 + image2
assert np.allclose(candidate(image1, image2), sum_im... | ../test_cases/sum_images.ipynb |
human-eval-bia_data_48 | def sum_intensity_projection(image):
"""
Performs a maximum intensity projection along the first axis of an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,3,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,6,0],
... | ../test_cases/sum_intensity_projection.ipynb |
human-eval-bia_data_49 | def tiled_image_processing(image, radius, tile_size):
"""
Apply a maximum filter with a given radius to the image using a tile-by-tile strategy.
The tile_size denotes the size of the tiles in X and Y.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
... | ../test_cases/tiled_image_processing.ipynb |
human-eval-bia_data_50 | def transpose_image_axes(image):
"""
Transposes the first two axes of an image.
"""
def check(candidate):
import numpy as np
image = np.asarray([
[0,0,0,0,0,0],
[0,1,0,0,2,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,4,0,0,3,0],
[0,0,0,0,0,0],
])
re... | ../test_cases/transpose_image_axes.ipynb |
human-eval-bia_data_51 | def t_test(dataframe, column1:str, column2:str):
"""
Takes two specified columns from a given dataframe and applies a paired T-test to it to determine the p-value.
"""
def check(candidate):
import pandas as pd
df = pd.DataFrame(
{
"a":[1.6,2.3,2.6,3.7,3.4,3.9,4.3,4.3,4.0,5.1,5.2,5.... | ../test_cases/t_test.ipynb |
human-eval-bia_data_52 | def workflow_batch_process_folder_count_labels(folder_location):
"""
This functions goes through all .tif image files in a specified folder,
loads the images and count labels each image.
It returns a dictionary with filenames and corresponding counts.
"""
def check(candidate):
counts = candid... | ../test_cases/workflow_batch_process_folder_count_labels.ipynb |
human-eval-bia_data_53 | def workflow_batch_process_folder_measure_intensity(image_folder_location, labels_folder_location):
"""
This functions goes through all .tif image files in a specified image folder
and corresponding label images in another labels folder.
It loads the images and corresponding labels, and measures min, ... | ../test_cases/workflow_batch_process_folder_measure_intensity.ipynb |
human-eval-bia_data_54 | def workflow_segmentation_counting(image):
"""
This function segments objects in an image with intensity above average
and returns their count.
"""
def check(candidate):
import numpy as np
result = candidate(np.asarray([
[0,0,0,0,0],
[0,1,0,0,0],
[0,0,0,2,0],
... | ../test_cases/workflow_segmentation_counting.ipynb |
human-eval-bia_data_55 | def workflow_segmentation_measurement_summary(image):
"""
This function implements a workflow consisting of these steps:
* threshold intensity input image using Otsu's method
* label connected components
* measure area of the labeled objects
* determine mean area of all objects
"""
def chec... | ../test_cases/workflow_segmentation_measurement_summary.ipynb |
human-eval-bia_data_56 | def workflow_segment_measure_umap(image):
"""
This function takes a single channel intensity image,
segments objects with intensity above half the maximum intensity,
labels connected components,
measures area, perimeter, mean_intensity, minor and major axis of the labeled objects,
and produc... | ../test_cases/workflow_segment_measure_umap.ipynb |
human-eval-bia_data_57 | def workflow_watershed_segmentation_correction_measurement(image):
"""
This function implements a workflow consisting of these steps:
* blurs the image a bit
* detect local minima in the blurred image
* apply watershed segmentation flooding the blurred image from the
detected minima to retrie... | ../test_cases/workflow_watershed_segmentation_correction_measurement.ipynb |
README.md exists but content is empty.
- Downloads last month
- 12