cerquide commited on
Commit
64ea7b2
·
1 Parent(s): a479402

Added aix sources

Browse files
src/aix/__init__.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ from pathlib import Path
4
+ import json
5
+ import os
6
+ import dataclasses
7
+ from dataclasses import dataclass
8
+
9
+ from typing import Any, Optional
10
+ import math
11
+
12
+ import logging
13
+ from logging import NullHandler, StreamHandler
14
+
15
+ import numpy as np
16
+ import cv2
17
+ import tensorflow as tf
18
+
19
+ __import__('pkg_resources').declare_namespace(__name__)
20
+
21
+ # Set default logging handler to avoid "No handler found" warnings.
22
+
23
+ logger = logging.getLogger(__name__)
24
+ if not logger.hasHandlers():
25
+ logger.addHandler(NullHandler())
26
+ logger.addHandler(StreamHandler(sys.stdout))
27
+ logger.setLevel('INFO')
28
+
29
+ # environment variables:
30
+ # DATAPATH: PATH of the data files
31
+
32
+ DATA_FOLDER = "/data/eurova/cumulus_database/"
33
+ if "DATAPATH" in os.environ:
34
+ DATA_FOLDER = os.environ["DATAPATH"]
35
+
36
+ if "AIX_DATA" in os.environ:
37
+ AIX_DATA = Path(os.environ["AIX_DATA"])
38
+ else:
39
+ AIX_DATA = Path("data")
40
+
41
+ if "AIX_MODELS" in os.environ:
42
+ AIX_MODELS = Path(os.environ["AIX_MODELS"])
43
+ else:
44
+ AIX_MODELS = Path("models")
45
+ if "AIX_EVALS" in os.environ:
46
+ AIX_EVALS = Path(os.environ["AIX_EVALS"])
47
+ else:
48
+ AIX_EVALS = Path("eval")
49
+
50
+ AIX_DATASETS = AIX_DATA / "datasets"
51
+
52
+ MATURE = "mature"
53
+ IMMATURE = "immature"
54
+
55
+ def init_path(output_path:Path, stages=[IMMATURE, MATURE]):
56
+ output_path.mkdir(parents=True, exist_ok=True)
57
+ for stage in stages:
58
+ (output_path/stage).mkdir(exist_ok=True)
59
+
60
+ # An item is a generalization which includes as particular cases an oocyte image, an oocyte mask, and patches of those.
61
+ @dataclass
62
+ class Item:
63
+ dataset: Any
64
+ mask: bool
65
+ index: str
66
+ stage: str = ""
67
+ extension: str = ".png"
68
+
69
+ def filename(self):
70
+ if self.mask:
71
+ bp = Path(self.dataset.rooted_annotations_path)
72
+ else:
73
+ bp = Path(self.dataset.rooted_images_path)
74
+ if self.stage != "":
75
+ bp = (bp / self.stage)
76
+ f_name = str(bp / (self.index + self.extension))
77
+ #print(f_name)
78
+ return f_name
79
+
80
+ def raw_image(self, opts=cv2.IMREAD_UNCHANGED, remove_alpha=True):
81
+ img = cv2.imread(self.filename(), opts)
82
+ if len(img.shape) == 3 and img.shape[2] == 4:
83
+ print(self.filename() + " is in RGBA format. We remove the A")
84
+ # print(np.unique(img[:,:,3]))
85
+ # print(np.unique(img[:,:,0]-img[:,:,1]))
86
+ img = img[:, :, :3]
87
+ return img
88
+
89
+ def float_image(self, opts=cv2.IMREAD_UNCHANGED):
90
+ return self.raw_image(opts).astype(np.float32)
91
+
92
+ def norm_image(self, opts=cv2.IMREAD_UNCHANGED):
93
+ return self.float_image(opts) / 255.
94
+
95
+ def uint_norm_image(self, opts=cv2.IMREAD_UNCHANGED):
96
+ return self.raw_image(opts) / 255.
97
+
98
+ def tensor(self, shape):
99
+ img = self.raw_image(cv2.IMREAD_GRAYSCALE)
100
+ if len(img.shape) == 2:
101
+ img.shape = (img.shape[0], img.shape[1], 1)
102
+ t = tf.convert_to_tensor(img)
103
+ t = tf.image.resize(t, shape[:2])
104
+ t = tf.cast(t, tf.float32)
105
+ return t
106
+
107
+ def norm_tensor(self, shape):
108
+ return self.tensor(shape)/255.
109
+
110
+ def write(self, img):
111
+ assert img.dtype == np.uint8
112
+ print("Writing image ", self.filename())
113
+ cv2.imwrite(self.filename(), img)
114
+
115
+ def copy(self):
116
+ return dataclasses.replace(self)
117
+
118
+ class Dataset:
119
+ def __init__(self, name, oocytes, images_path:str, annotations_path:Optional[str]=None, image_extension=".png",
120
+ stages=[IMMATURE, MATURE], create_folders=False):
121
+ self.name = name
122
+ self.oocytes = oocytes
123
+ self.stages = stages
124
+ print("Number of oocytes for dataset ", name, ":", len(self.oocytes))
125
+ # root path with subfolders immature / mature
126
+
127
+ if os.path.isabs(images_path):
128
+ rooted_images_path = Path(images_path)
129
+ else:
130
+ rooted_images_path = AIX_DATA / images_path
131
+
132
+ if annotations_path is not None:
133
+ if os.path.isabs(annotations_path):
134
+ rooted_annotations_path = Path(annotations_path)
135
+ else:
136
+ # !="" and not os.path.isabs(annotations_path) and annotations_path[:2]!="./"):
137
+ rooted_annotations_path = AIX_DATA / annotations_path
138
+ else:
139
+ rooted_annotations_path = None
140
+ # Check
141
+ if create_folders:
142
+ init_path(rooted_images_path, stages)
143
+ if rooted_annotations_path is not None:
144
+ init_path(rooted_annotations_path, stages)
145
+ else:
146
+ for subfold in stages:
147
+ if not (rooted_images_path / subfold).is_dir():
148
+ raise Exception("Path "+ str(rooted_images_path) +" not found.")
149
+ if rooted_annotations_path is not None and not (rooted_annotations_path / subfold).is_dir():
150
+ raise Exception("Path "+ str(rooted_annotations_path) +" not found.")
151
+
152
+ self.images_path = images_path
153
+ self.annotations_path = annotations_path
154
+ self.rooted_images_path = rooted_images_path
155
+ self.rooted_annotations_path = rooted_annotations_path
156
+ self.extension = image_extension
157
+
158
+ @staticmethod
159
+ def from_folder(name, folder_name, images_path, annotations_path, image_extension=".png"):
160
+ if not Path(folder_name).is_dir():
161
+ raise Exception("Path "+folder_name+" not found.")
162
+
163
+ oocytes = sorted(f.stem for f in Path(folder_name).iterdir() if f.suffix == image_extension)
164
+
165
+ return Dataset(name, oocytes, images_path, annotations_path, image_extension)
166
+
167
+ @staticmethod
168
+ def from_file(file_name: Path):
169
+ if not Path(file_name).is_file():
170
+ raise Exception("File "+str(file_name)+" not found")
171
+ json_data = open(file_name).read()
172
+ data = json.loads(json_data)
173
+ if "image_extension" not in data:
174
+ data['image_extension'] = ".png"
175
+ dataset = Dataset(data["name"], data["oocytes"], data["images"], data["annotations"], data["image_extension"])
176
+ return dataset
177
+
178
+ @staticmethod
179
+ def create(name, images_path:str, annotations_path:str,
180
+ image_extension=".png", stages=[IMMATURE, MATURE]):
181
+ #init_path(AIX_DATA / images_path, stages)
182
+ #if annotations_path!="":
183
+ # init_path(AIX_DATA / annotations_path, stages)
184
+ return Dataset(name, [], images_path, annotations_path, image_extension, create_folders=True)
185
+
186
+ def num_images(self):
187
+ return len(self.stages)*len(self.oocytes)
188
+
189
+ def save(self, file_name):
190
+ d = {"name": self.name, "oocytes" : self.oocytes,
191
+ "image_extension": self.extension,
192
+ "images": str(self.images_path),
193
+ "annotations": str(self.annotations_path)}
194
+ with open(file_name, "w") as f:
195
+ f.write(json.dumps(d))
196
+
197
+ def has_annotations(self):
198
+ return self.annotations_path is not None
199
+
200
+ def new_item(self, mask=False, stage="", index=""):
201
+ return Item(self, mask, index=index, stage=stage, extension=self.extension)
202
+
203
+ def cv_item_iterator(self, k=10, seed=42, maturity=None):
204
+ random_arr = np.arange(len(self.oocytes))
205
+ np.random.seed(seed)
206
+ np.random.shuffle(random_arr)
207
+ oocyte_items = []
208
+ mask_items = []
209
+ for i in random_arr:
210
+ oocyte_index = self.oocytes[i]
211
+ if maturity is None or maturity == IMMATURE:
212
+ oocyte_items.append(self.new_item(mask=False, stage=IMMATURE, index=oocyte_index))
213
+ mask_items.append(self.new_item(mask=True, stage=IMMATURE, index=oocyte_index))
214
+ if maturity is None or maturity == MATURE:
215
+ oocyte_items.append(self.new_item(mask=False, stage=MATURE, index=oocyte_index))
216
+ mask_items.append(self.new_item(mask=True, stage=MATURE, index=oocyte_index))
217
+
218
+ fold_sizes = np.repeat(len(self.oocytes)// k, k)
219
+ # Adjust sizes when len no multiple of k
220
+ fold_sizes[:len(self.oocytes) % k] += 1
221
+ if maturity is None:
222
+ fold_sizes *= 2
223
+ num_fold = np.repeat(np.arange(k), fold_sizes)
224
+ oocyte_items = np.array(oocyte_items)
225
+ mask_items = np.array(mask_items)
226
+
227
+ for fold in range(k):
228
+ x_train = oocyte_items[num_fold != fold]
229
+ y_train = mask_items[num_fold != fold]
230
+ x_test = oocyte_items[num_fold == fold]
231
+ y_test = mask_items[num_fold == fold]
232
+ yield x_train, x_test, y_train, y_test
233
+
234
+ @classmethod
235
+ def tf_dataset_from_items(cls, x, y, image_shape, mask_shape):
236
+ def f():
237
+ for x_item, y_item in zip(x, y):
238
+ yield x_item.tensor(image_shape), y_item.norm_tensor(mask_shape)
239
+
240
+ return tf.data.Dataset.from_generator(f,
241
+ output_signature=(tf.TensorSpec(shape=image_shape, dtype=tf.float32),
242
+ tf.TensorSpec(shape=mask_shape, dtype=tf.float32)))
243
+
244
+ def cv_tf_dataset_iterator(self, image_shape, mask_shape, k=10, seed=42, maturity=None):
245
+ for x_train, x_test, y_train, y_test in self.cv_item_iterator(k=k, seed=seed, maturity=maturity):
246
+ train = self.tf_dataset_from_items(x_train, y_train, image_shape, mask_shape)
247
+ test = self.tf_dataset_from_items(x_test, y_test, image_shape, mask_shape)
248
+ yield (x_train, y_train), train, (x_test, y_test), test
249
+
250
+
251
+ def train_test_iterator(self, k=10, seed=42):
252
+ random_arr = np.arange(len(self.oocytes))
253
+ np.random.seed(seed)
254
+ np.random.shuffle(random_arr)
255
+
256
+ image_files = []
257
+ mask_files = []
258
+ for idx in random_arr:
259
+
260
+ for stage in self.stages:
261
+ image_files.append((Path(self.rooted_images_path) / stage / (self.oocytes[idx])).as_posix())
262
+ mask_files.append((Path(self.rooted_annotations_path) / stage / (self.oocytes[idx])).as_posix())
263
+
264
+ fold_sizes = np.repeat(len(self.oocytes)// k, k)
265
+ # Adjust sizes when len no multiple of k
266
+ fold_sizes[:len(self.oocytes) % k] += 1
267
+
268
+ num_fold = np.repeat(np.arange(10), fold_sizes * 2)
269
+ image_files = np.array(image_files)
270
+ mask_files = np.array(mask_files)
271
+
272
+ for fold in range(k):
273
+ x_train = image_files[num_fold!=fold]
274
+ y_train = mask_files[num_fold!=fold]
275
+ x_test = image_files[num_fold==fold]
276
+ y_test = mask_files[num_fold==fold]
277
+ yield x_train, x_test, y_train, y_test
278
+
279
+ def train_test_split(self, percent=90, seed=42):
280
+ random_arr = np.arange(len(self.oocytes))
281
+ np.random.seed(seed)
282
+ np.random.shuffle(random_arr)
283
+ first_test = math.floor(percent * len(self.oocytes)/100.)
284
+ oocytes_a = np.array(self.oocytes)
285
+ train_oocytes = list(oocytes_a[:first_test])
286
+ test_oocytes = list(oocytes_a[first_test:])
287
+ train_ds = Dataset(self.name+"train", train_oocytes, self.images_path, self.annotations_path)
288
+ test_ds = Dataset(self.name+"test", test_oocytes, self.images_path, self.annotations_path)
289
+ return train_ds, test_ds
290
+
291
+ def tfDataset(self):
292
+ idx = self.oocytes[0]
293
+ image_shape = self.new_item(mask=False, stage=IMMATURE, index=idx).tensor().shape
294
+ mask_shape = self.new_item(mask=True, stage=IMMATURE, index=idx).tensor().shape
295
+ return tf.data.Dataset.from_generator(self.iterate_pairs,
296
+ output_signature=(tf.TensorSpec(shape=image_shape, dtype=tf.float32),
297
+ tf.TensorSpec(shape=mask_shape, dtype=tf.float32)))
298
+
299
+ def tfDataset_fixed_shape(self, image_shape, mask_shape):
300
+ def f():
301
+ for x_item, y_item in self.iterate_pairs(tensor=False):
302
+ yield x_item.tensor(image_shape), y_item.norm_tensor(mask_shape)
303
+
304
+ return tf.data.Dataset.from_generator(f,
305
+ output_signature=(tf.TensorSpec(shape=image_shape, dtype=tf.float32),
306
+ tf.TensorSpec(shape=mask_shape, dtype=tf.float32)))
307
+
308
+ def iterate_pairs(self, tensor=True):
309
+ for idx in self.oocytes:
310
+ for stage in self.stages:
311
+ x = self.new_item(mask=False, stage=stage, index=idx)
312
+ y = self.new_item(mask=True, stage=stage, index=idx)
313
+ if tensor:
314
+ x = x.tensor()
315
+ y = y.tensor()
316
+ yield x, y
317
+
318
+ def iterate_items(self):
319
+ for idx in self.oocytes:
320
+ for stage in self.stages:
321
+ yield self.new_item(mask=False, stage=stage, index=idx)
322
+ yield self.new_item(mask=True, stage=stage, index=idx)
323
+
324
+ def iterate_oocyte_items(self, tensor=True):
325
+ for idx in self.oocytes:
326
+ for stage in self.stages:
327
+ x = self.new_item(mask=False, stage=stage, index=idx)
328
+ if tensor:
329
+ x = x.tensor()
330
+ yield x
331
+
332
+ def iterate_mask_items(self):
333
+ for idx in self.oocytes:
334
+ for stage in self.stages:
335
+ yield self.new_item(mask=True, stage=stage, index=idx)
336
+
337
+ def iterate_oocyte_masks(self):
338
+ for idx in self.oocytes:
339
+ masks = []
340
+ for stage in self.stages:
341
+ x = self.new_item(mask=True, stage=stage, index=idx)
342
+ masks.append(x)
343
+ yield masks
344
+
345
+ def __repr__(self):
346
+ return "<Dataset: {}>".format(self.name)
347
+
348
+ def add_oocyte(self, index):
349
+ if index not in self.oocytes:
350
+ self.oocytes.append(index)
src/aix/augmentation.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import tensorflow as tf
3
+
4
+ class Augment(tf.keras.layers.Layer):
5
+ def __init__(self, seed=42):
6
+ super().__init__()
7
+ # both use the same seed, so they'll make the same random changes.
8
+ self.augment_inputs = tf.keras.layers.RandomRotation(factor=(-0.9, 0.9), fill_mode="constant",
9
+ interpolation="bilinear", seed=seed, fill_value=0.0)
10
+ self.augment_labels = tf.keras.layers.RandomRotation(factor=(-0.9, 0.9), fill_mode="constant",
11
+ interpolation="bilinear", seed=seed, fill_value=0.0)
12
+ self.augment_inputs = tf.keras.layers.RandomFlip(mode="horizontal_and_vertical", seed=seed)
13
+ self.augment_labels = tf.keras.layers.RandomFlip(mode="horizontal_and_vertical", seed=seed)
14
+
15
+ def call(self, inputs, labels):
16
+ inputs = self.augment_inputs(inputs)
17
+ labels = self.augment_labels(labels)
18
+ return inputs, labels
19
+
20
+
21
+ def augment_flip(image, label, axis=0):
22
+ if axis == 0:
23
+
24
+ image = tf.image.random_flip_left_right(image, seed=42)
25
+ label = tf.image.random_flip_left_right(label, seed=42)
26
+
27
+ else:
28
+
29
+ image = tf.image.random_flip_up_down(image, seed=42)
30
+ label = tf.image.random_flip_up_down(label, seed=42)
31
+
32
+ return image, label
33
+
34
+
35
+ def augment_rot(image, label, kappa=1):
36
+ image = tf.image.rot90(image, k=kappa)
37
+ label = tf.image.rot90(label, k=kappa)
38
+
39
+ return image, label
40
+
41
+
42
+ def augment(images, labels, seed=42):
43
+ print(type(images))
44
+ print(tf.shape(images))
45
+
46
+ images = tf.image.random_flip_left_right(images, seed=seed)
47
+ labels = tf.image.random_flip_left_right(labels, seed=seed)
48
+ images = tf.image.random_flip_up_down(images, seed=seed)
49
+ labels = tf.image.random_flip_up_down(labels, seed=seed)
50
+ images = tf.image.rot90(images, k=2)
51
+ labels = tf.image.rot90(labels, k=2)
52
+ # images = tf.image.random_crop(images, size = [1, IMG_SIZE[0], IMG_SIZE[1], 1], seed = seed)
53
+ # labels = tf.image.random_crop(labels, size = [1, IMG_SIZE[0], IMG_SIZE[1], 1], seed = seed)
54
+
55
+ return images, labels
56
+
src/aix/consensus.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def mean(input_masks):
5
+ zeros = np.zeros_like(input_masks[0]).astype(np.float64)
6
+ float_mask = sum(input_masks, zeros) / len(input_masks)
7
+ return float_mask.astype(np.uint8)
8
+
9
+
10
+ def intersection(input_masks):
11
+ m = np.copy(input_masks[0])
12
+ mmean = mean(input_masks)
13
+ m[mmean < 255.] = 0.
14
+ m[mmean == 255.] = 255.
15
+ return m
16
+
17
+
18
+ def majority(input_masks):
19
+ m = np.copy(input_masks[0])
20
+ mmean = mean(input_masks)
21
+ m[mmean < 127] = 0
22
+ m[mmean > 127] = 255
23
+ return m
24
+
25
+
26
+
27
+ MEAN = "mean"
28
+ INTERSECTION = "intersection"
29
+ MAJORITY = "majority"
30
+
31
+ consensus_methods = {}
32
+ consensus_methods[MEAN] = mean
33
+ consensus_methods[INTERSECTION] = intersection
34
+ consensus_methods[MAJORITY] = majority
35
+
36
+
37
+ def get_consensus(consensus_name):
38
+ return consensus_methods[consensus_name]
src/aix/constants.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ SEED = 42
4
+
5
+ INIT_WIDTH = 1944
6
+ INIT_HEIGHT = 2592
7
+
8
+ IMG_WIDTH = 192
9
+ IMG_HEIGHT = 240
10
+ IMG_CHANNELS = 1
11
+
12
+ VIEW = False
13
+
14
+ epsilon = .0001
15
+
16
+ # model parameters
17
+
18
+ OPTIMIZER = 'adam'
19
+ MODEL_LOSS = 'binary_crossentropy'
20
+
21
+ patience = 500
22
+
23
+ val_split = 0.1
24
+ batch_size = 16
25
+
26
+ TEST_SIZE = 20
27
+
28
+ MATURE = "mature"
29
+ IMMATURE = "immature"
30
+ BASE_DATA_DIR = Path("/data/eurova/cumulus_database/")
31
+ BASE_MASKS_DIR = BASE_DATA_DIR/"masks"
32
+
src/aix/entropy.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import skimage.morphology
4
+ import skimage.filters.rank
5
+
6
+
7
+ # Aux functions (originally from suzie.images)
8
+
9
+ def local_entropy(im, kernel_size=5, normalize=True):
10
+ kernel = skimage.morphology.disk(kernel_size)
11
+ if len(im.shape)>2:
12
+ kkernel = np.zeros((kernel.shape[0], kernel.shape[1],im.shape[2]))
13
+ for i in range(im.shape[2]):
14
+ kkernel[:,:,i]=kernel
15
+ kernel = kkernel
16
+ entr_img = skimage.filters.rank.entropy(im, kernel)
17
+ if normalize:
18
+ max_img = np.max(entr_img)
19
+ entr_img = (entr_img * 255 / max_img).astype(np.uint8)
20
+ return entr_img
21
+
22
+
23
+ def calc_dim(contour):
24
+ c_0 = [point[0][0] for point in contour]
25
+ c_1 = [point[0][1] for point in contour]
26
+ return (min(c_0), max(c_0), min(c_1), max(c_1))
27
+
28
+
29
+ def calc_size(dim):
30
+ return (dim[1] - dim[0]) * (dim[3] - dim[2])
31
+
32
+
33
+ def calc_dist(dim1, dim2):
34
+ return None
35
+
36
+
37
+ # -- Main function
38
+
39
+ def extract_mask(img, filled=True, threshold=135, kernel_size=5):
40
+ entr_img = local_entropy(img, kernel_size=kernel_size)
41
+ _, mask = cv2.threshold(entr_img, threshold, 255, cv2.THRESH_BINARY)
42
+ while(len(np.unique(mask))==1):
43
+ threshold -= 5
44
+ print("Reducing threshold to ",threshold)
45
+ _, mask = cv2.threshold(entr_img, threshold, 255, cv2.THRESH_BINARY)
46
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
47
+
48
+ contours_d = [calc_dim(c) for c in contours]
49
+ contours_sizes = [calc_size(c) for c in contours_d]
50
+ contour_indices = np.argsort(contours_sizes)[::-1]
51
+
52
+ # remove artifacts
53
+ fratio = 0
54
+ sratio = 5
55
+ idx = -1
56
+ while fratio < 0.3 or sratio > 5:
57
+ idx += 1
58
+ biggest = contour_indices[idx]
59
+ filled_mask = np.zeros(img.shape, dtype=np.uint8)
60
+ filled_mask = cv2.fillPoly(filled_mask, [contours[biggest]], 255)
61
+ fratio = filled_mask.sum() / 255 / contours_sizes[biggest]
62
+ cdim = contours_d[biggest]
63
+ sratio = (cdim[3] - cdim[2]) / (cdim[1] - cdim[0])
64
+ if sratio < 1: sratio = 1 / sratio
65
+
66
+ # generating the mask
67
+ filled_mask = np.zeros(img.shape, dtype=np.uint8)
68
+ if filled:
69
+ filled_mask = cv2.fillPoly(filled_mask, [contours[biggest]], 255)
70
+ else:
71
+ filled_mask = cv2.polylines(filled_mask, [contours[biggest]], 30, 255)
72
+
73
+ return filled_mask
src/aix/evaluation.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tqdm import tqdm
2
+
3
+ import aix
4
+ from aix import AIX_DATASETS, AIX_EVALS, MATURE, IMMATURE
5
+ from aix.np_metrics import hard_dice, dice, rel_size_diff
6
+ import cv2
7
+ import pandas as pd
8
+ import json
9
+
10
+ all_measures = {
11
+ "dice": dice,
12
+ "hard_dice": hard_dice,
13
+ "rel_size_diff": rel_size_diff
14
+ }
15
+
16
+ OOCYTE = "oocyte"
17
+ REFERENCE = "reference"
18
+ COMPARED = "compared"
19
+
20
+
21
+ def load_csv(filename):
22
+ csv_file_path = AIX_EVALS / filename
23
+ if csv_file_path.is_file():
24
+ print("Loaded")
25
+ return pd.read_csv(csv_file_path, dtype={OOCYTE: str})
26
+ else:
27
+ print("NOT FOUND!!!")
28
+ print(csv_file_path)
29
+ return None
30
+
31
+
32
+ def compare_masks(reference: str, alternatives: str, measures=all_measures,
33
+ in_csv_metrics="eval.csv", out_csv_metrics="out.csv"):
34
+ df_old = load_csv(in_csv_metrics)
35
+ print(df_old)
36
+ reference_dataset = aix.Dataset.from_file(AIX_DATASETS / reference)
37
+ alt_datasets = {}
38
+ for alt in alternatives:
39
+ alt_datasets[alt] = aix.Dataset.from_file(AIX_DATASETS / alt)
40
+ l = []
41
+ for item_mask in tqdm(reference_dataset.iterate_mask_items()):
42
+ ref_mask_img = item_mask.norm_image(cv2.IMREAD_GRAYSCALE)
43
+ for alt, alt_d in alt_datasets.items():
44
+ if item_mask.index in alt_d.oocytes:
45
+ alt_mask = aix.Item(alt_d, mask=item_mask.mask, stage=item_mask.stage, index=item_mask.index)
46
+ alt_mask_img = alt_mask.norm_image(cv2.IMREAD_GRAYSCALE)
47
+ d = {OOCYTE: item_mask.index,
48
+ MATURE: item_mask.stage,
49
+ REFERENCE: reference_dataset.name,
50
+ COMPARED: alt_d.name}
51
+ for measure_name, measure in measures.items():
52
+ m = measure(ref_mask_img, alt_mask_img)
53
+ d[measure_name] = m
54
+ l.append(d)
55
+ else:
56
+ print("Ignoring oocyte " + str(item_mask.index) + " for dataset " + alt_d.name)
57
+ #break
58
+ df = pd.DataFrame(l)
59
+ if df_old is not None:
60
+ index_fields = [OOCYTE, MATURE, REFERENCE, COMPARED]
61
+ df = df_old.merge(df, how='outer', on=index_fields, suffixes=("_x", None))
62
+ print("After merge")
63
+ print(df[COMPARED].unique())
64
+ for measure_name in measures:
65
+ index_fields.append(measure_name)
66
+ df[measure_name].fillna(df[measure_name + "_x"], inplace=True)
67
+ df = df[index_fields]
68
+ out_csv_file_path = AIX_EVALS / out_csv_metrics
69
+ out_csv_file_path.parent.mkdir(parents=True, exist_ok=True)
70
+ print(out_csv_file_path)
71
+ df.to_csv(out_csv_file_path, index=False)
72
+
73
+ return df
74
+
75
+
76
+ def make_dict(df: pd.DataFrame):
77
+ d = {}
78
+
79
+ metrics = df.columns
80
+ for index, row in df.iterrows():
81
+ name = "-".join(index)
82
+ for i, val in enumerate(row):
83
+ namem = name + "-" + metrics[i]
84
+ d[namem] = val
85
+ return d
86
+
87
+ def summarize_comparison(df:pd.DataFrame, metrics_file="eval.json"):
88
+ print(df)
89
+ gb = df.drop([MATURE,OOCYTE], axis=1).groupby([REFERENCE, COMPARED])
90
+ print(gb)
91
+ means = make_dict(gb.mean())
92
+ medians = make_dict(gb.median())
93
+ d = {"MEANS": means, "MEDIANS": medians}
94
+ filepath = AIX_EVALS/metrics_file
95
+ with filepath.open("w") as f:
96
+ json.dump(d, f)
97
+ # df.groupby([REFERENCE, COMPARED, MATURE])
98
+ # df.mean()
99
+ # df.median()
100
+ #
101
+ # measures = df.columns[4:]
102
+ # labelings = np.unique(df[])
103
+ # for alt in alternatives:
104
+ # print("==", alt)
105
+ #
106
+ # for measure_name in measures:
107
+ # condition = (df["reference"] == reference_dataset.name) & (df["compared"] == alt_datasets[alt].name)
108
+ # measure_slice = df[condition][measure_name].to_numpy()
109
+ # print(" Mean " + measure_name + ": %4.4f" % np.mean(measure_slice))
110
+ # print(" Median " + measure_name + ": %4.4f" % np.median(measure_slice))
111
+ # for mature in [False,True]:
112
+ # condition_mature = condition & (df[MATURE]==mature)
113
+ # measure_slice = df[condition_mature][measure_name].to_numpy()
114
+ # mature_txt = MATURE if mature else IMMATURE
115
+ # print(" Mean " + measure_name + " " + mature_txt + ": %4.4f" % np.mean(measure_slice))
116
+ # print(" Median " + measure_name + " " + mature_txt + ": %4.4f" % np.median(measure_slice))
117
+
118
+
119
+
120
+ def evaluate_scale_loss(reference: str, dim, interpolation=cv2.INTER_AREA, extrapolation=cv2.INTER_CUBIC, measures=all_measures):
121
+ reference_dataset = aix.Dataset.from_file(reference)
122
+ measure_vals = {}
123
+ for measure_name in measures:
124
+ measure_vals[measure_name] = []
125
+ for item_mask in tqdm(reference_dataset.iterate_mask_items()):
126
+ ref_mask_img = item_mask.uint_norm_image()
127
+
128
+ resized = cv2.resize(ref_mask_img, dim, interpolation=interpolation)
129
+ back_area = cv2.resize(resized, (ref_mask_img.shape[1], ref_mask_img.shape[0]), interpolation=extrapolation)
130
+ back_area[back_area<0.]=0.
131
+ back_area[back_area>1.]=1.
132
+ for measure_name, measure in measures.items():
133
+ measure_vals[measure_name].append(measure(ref_mask_img, back_area))
134
+
135
+ print("\nReference: " + reference + "\n")
136
+
137
+ first_measure_name = next(iter(measures))
138
+ n_masks = len(measure_vals[first_measure_name])
139
+ print(" N.Images : %4d" % n_masks)
140
+ for measure_name in measures:
141
+ print(" " + measure_name + ": %4.2f" % (sum(measure_vals[measure_name]) / n_masks))
src/aix/load_dataset.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from aix import Dataset
2
+ import aix.constants as C
3
+ import os
4
+ import numpy as np
5
+ import cv2
6
+
7
+ def load_image(path):
8
+
9
+ img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
10
+ array = np.reshape(img, (img.shape[0], img.shape[1], -1)) / 255
11
+
12
+ return array
13
+
14
+ def temp_numpy_masks(training_fold):
15
+
16
+ image_rows = C.IMG_WIDTH
17
+ image_cols = C.IMG_HEIGHT
18
+
19
+ X_train = np.ndarray((len(training_fold[0]), image_rows, image_cols, 1), dtype = np.float64)
20
+ y_train = np.ndarray((len(training_fold[1]), image_rows, image_cols, 1), dtype = np.float64)
21
+ X_test = np.ndarray((len(training_fold[2]), image_rows, image_cols, 1), dtype = np.float64)
22
+ y_test = np.ndarray((len(training_fold[3]), image_rows, image_cols, 1), dtype = np.float64)
23
+
24
+ for i, obj in enumerate(training_fold[0]):
25
+
26
+ img = load_image(obj)
27
+ img = cv2.resize(img, (image_cols, image_rows), interpolation = cv2.INTER_AREA)
28
+ img = np.expand_dims(img, axis = 2)
29
+ X_train[i] = img
30
+
31
+ for i, obj in enumerate(training_fold[1]):
32
+
33
+ mask = load_image(obj)
34
+ mask = cv2.resize(mask, (image_cols, image_rows), interpolation = cv2.INTER_AREA)
35
+ mask = np.expand_dims(mask, axis = 2)
36
+ y_train[i] = mask
37
+
38
+ for i, obj in enumerate(training_fold[2]):
39
+
40
+ img = load_image(obj)
41
+ img = cv2.resize(img, (image_cols, image_rows), interpolation = cv2.INTER_AREA)
42
+ img = np.expand_dims(img, axis = 2)
43
+ X_test[i] = img
44
+
45
+ for i, obj in enumerate(training_fold[3]):
46
+
47
+ mask = load_image(obj)
48
+ mask = cv2.resize(mask, (image_cols, image_rows), interpolation = cv2.INTER_AREA)
49
+ mask = np.expand_dims(mask, axis = 2)
50
+ y_test[i] = mask
51
+
52
+ return X_train, y_train, X_test, y_test
53
+
54
+ def load_training_batch(name = 'average_model', oocytes_list = [], images_path = '', annotations_path = ''):
55
+
56
+ dataset = Dataset(name, oocytes_list, images_path, annotations_path)
57
+
58
+ gen = dataset.train_test_iterator()
59
+
60
+ j = 0
61
+ folds = []
62
+ for x_train, x_test, y_train, y_test in gen:
63
+
64
+ j += 1
65
+ print(j)
66
+ folds.append(temp_numpy_masks([x_train, y_train, x_test, y_test]))
67
+
68
+ # save temporary numpies??
69
+
70
+ # DO MODEL stuff in here
71
+
72
+ return folds
src/aix/losses.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+
4
+ import tensorflow as tf
5
+ from tensorflow.keras import backend as K
6
+ from keras.layers import *
7
+ from keras.losses import binary_crossentropy
8
+ import keras
9
+
10
+ from aix import logger
11
+ import aix.constants as C
12
+
13
+ epsilon = 1e-5
14
+ smooth = 1
15
+
16
+
17
+ def dice_coef(y_true, y_pred):
18
+
19
+ y_true_f = K.flatten(y_true)
20
+ y_pred_f = K.flatten(y_pred)
21
+ intersection = K.sum(y_true_f * y_pred_f)
22
+
23
+ return (2. * intersection + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon())
24
+
25
+ alpha = 0.25
26
+ gamma = 2
27
+
28
+ def focal_loss_with_logits(logits, targets, alpha, gamma, y_pred):
29
+
30
+ weight_a = alpha * (1 - y_pred) ** gamma * targets
31
+ weight_b = (1 - alpha) * y_pred ** gamma * (1 - targets)
32
+
33
+ return (tf.math.log1p(tf.exp(-tf.abs(logits))) + tf.nn.relu(-logits)) * (weight_a + weight_b) + logits * weight_b
34
+
35
+ def focal_loss(y_true, y_pred):
36
+
37
+ y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon())
38
+ logits = tf.math.log(y_pred / (1 - y_pred))
39
+
40
+ loss = focal_loss_with_logits(logits=logits, targets=y_true, alpha=alpha, gamma=gamma, y_pred=y_pred)
41
+
42
+ return tf.reduce_mean(loss)
43
+
44
+ @keras.saving.register_keras_serializable(package="aix.losses")
45
+ def dsc(y_true, y_pred):
46
+
47
+ smooth = 1.
48
+ y_true_f = K.flatten(y_true)
49
+ y_pred_f = K.flatten(y_pred)
50
+ intersection = K.sum(y_true_f * y_pred_f)
51
+ score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
52
+
53
+ return score
54
+
55
+
56
+ @keras.saving.register_keras_serializable(package="aix.losses")
57
+ def dice_loss(y_true, y_pred):
58
+
59
+ loss = 1 - dsc(y_true, y_pred)
60
+
61
+ return loss
62
+
63
+
64
+ def bce_dice_loss(y_true, y_pred):
65
+
66
+ loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
67
+
68
+ return loss
69
+
70
+ def confusion(y_true, y_pred):
71
+
72
+ smooth = 1
73
+ y_pred_pos = K.clip(y_pred, 0, 1)
74
+ y_pred_neg = 1 - y_pred_pos
75
+ y_pos = K.clip(y_true, 0, 1)
76
+ y_neg = 1 - y_pos
77
+ tp = K.sum(y_pos * y_pred_pos)
78
+ fp = K.sum(y_neg * y_pred_pos)
79
+ fn = K.sum(y_pos * y_pred_neg)
80
+ prec = (tp + smooth) / (tp + fp + smooth)
81
+ recall = (tp + smooth) / (tp + fn + smooth)
82
+
83
+ return prec, recall
84
+
85
+ def tp(y_true, y_pred):
86
+
87
+ smooth = 1
88
+ y_pred_pos = K.round(K.clip(y_pred, 0, 1))
89
+ y_pos = K.round(K.clip(y_true, 0, 1))
90
+ tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)
91
+
92
+ return tp
93
+
94
+ def tn(y_true, y_pred):
95
+ smooth = 1
96
+ y_pred_pos = K.round(K.clip(y_pred, 0, 1))
97
+ y_pred_neg = 1 - y_pred_pos
98
+ y_pos = K.round(K.clip(y_true, 0, 1))
99
+ y_neg = 1 - y_pos
100
+ tn = (K.sum(y_neg * y_pred_neg) + smooth) / (K.sum(y_neg) + smooth )
101
+
102
+ return tn
103
+
104
+ def tversky(y_true, y_pred):
105
+
106
+ y_true_pos = K.flatten(y_true)
107
+ y_pred_pos = K.flatten(y_pred)
108
+ true_pos = K.sum(y_true_pos * y_pred_pos)
109
+ false_neg = K.sum(y_true_pos * (1 - y_pred_pos))
110
+ false_pos = K.sum((1 - y_true_pos) * y_pred_pos)
111
+ alpha = 0.7
112
+
113
+ return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
114
+
115
+ def tversky_loss(y_true, y_pred):
116
+
117
+ return 1 - tversky(y_true, y_pred)
118
+
119
+ def focal_tversky(y_true,y_pred):
120
+
121
+ pt_1 = tversky(y_true, y_pred)
122
+ gamma = 0.75
123
+
124
+ return K.pow((1 - pt_1), gamma)
125
+
126
+ def sensitivity(y_true, y_pred):
127
+ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
128
+ possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
129
+ return true_positives / (possible_positives + K.epsilon())
130
+
131
+ def specificity(y_true, y_pred):
132
+ true_negatives = K.sum(K.round(K.clip((1 - y_true) * (1 - y_pred), 0, 1)))
133
+ possible_negatives = K.sum(K.round(K.clip(1 - y_true, 0, 1)))
134
+ return true_negatives / (possible_negatives + K.epsilon())
src/aix/models.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from aix.constants import *
2
+
3
+ import tensorflow as tf
4
+
5
+
6
+ def initialize_model(img_width, img_height, img_channels):
7
+ #,
8
+ # optimizer = 'adam', model_loss = 'binary_crossentropy', data_augm = False):
9
+
10
+ inputs = tf.keras.layers.Input((img_width, img_height, img_channels), name='input')
11
+
12
+ #Contraction path
13
+ c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block1_conv2d_1')(inputs)
14
+ c1 = tf.keras.layers.Dropout(0.1, name='block1_dropout')(c1)
15
+ c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block1_conv2d_2')(c1)
16
+ p1 = tf.keras.layers.MaxPooling2D((2, 2), name='block1_max_pooling')(c1)
17
+
18
+ c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block2_conv2d_1')(p1)
19
+ c2 = tf.keras.layers.Dropout(0.1, name='block2_dropout')(c2)
20
+ c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block2_conv2d_2')(c2)
21
+ p2 = tf.keras.layers.MaxPooling2D((2, 2), name='block2_max_pooling')(c2)
22
+
23
+ c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block3_conv2d_1')(p2)
24
+ c3 = tf.keras.layers.Dropout(0.2, name='block3_dropout')(c3)
25
+ c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block3_conv2d_2')(c3)
26
+ p3 = tf.keras.layers.MaxPooling2D((2, 2), name='block3_max_pooling')(c3)
27
+
28
+ c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block4_conv2d_1')(p3)
29
+ c4 = tf.keras.layers.Dropout(0.2, name='block4_dropout')(c4)
30
+ c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block4_conv2d_2')(c4)
31
+ p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), name='block4_max_pooling')(c4)
32
+
33
+ c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block5_conv2d_1')(p4)
34
+ c5 = tf.keras.layers.Dropout(0.3, name='block5_dropout')(c5)
35
+ c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block5_conv2d_2')(c5)
36
+
37
+ #Expansive path
38
+ u6 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same', name='block6_conv2d_transpose')(c5)
39
+ u6 = tf.keras.layers.concatenate([u6, c4], name='block6_concatenate')
40
+ c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block6_conv2d_1')(u6)
41
+ c6 = tf.keras.layers.Dropout(0.2, name='block6_dropout')(c6)
42
+ c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block6_conv2d_2')(c6)
43
+
44
+ u7 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same', name='block7_conv2d_transpose')(c6)
45
+ u7 = tf.keras.layers.concatenate([u7, c3], name='block7_concatenate')
46
+ c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block7_conv2d_1')(u7)
47
+ c7 = tf.keras.layers.Dropout(0.2, name='block7_dropout')(c7)
48
+ c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block7_conv2d_2')(c7)
49
+
50
+ u8 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same', name='block8_conv2d_transpose')(c7)
51
+ u8 = tf.keras.layers.concatenate([u8, c2], name='block8_concatenate')
52
+ c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block8_conv2d_1')(u8)
53
+ c8 = tf.keras.layers.Dropout(0.1, name='block8_dropout')(c8)
54
+ c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block8_conv2d_2')(c8)
55
+
56
+ u9 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same', name='block9_conv2d_transpose')(c8)
57
+ u9 = tf.keras.layers.concatenate([u9, c1], axis=3, name='block9_concatenate')
58
+ c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block9_conv2d_1')(u9)
59
+ c9 = tf.keras.layers.Dropout(0.1, name='block9_dropout')(c9)
60
+ c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same', name='block9_conv2d_2')(c9)
61
+
62
+ outputs = tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid', name='output')(c9)
63
+
64
+ model = tf.keras.Model(inputs = [inputs], outputs = [outputs])
65
+ #model.compile(optimizer = optimizer, loss = model_loss, metrics = [dice_coef])
66
+ #model.summary()
67
+
68
+ return model
69
+
70
+
71
+ def initialize_model_v2(img_width, img_height, img_channels,
72
+ optimizer = 'adam', model_loss = 'binary_crossentropy', data_augm = False):
73
+
74
+ n = 1
75
+ inputs = tf.keras.layers.Input((img_width, img_height, img_channels))
76
+ #Contraction path
77
+ c1 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(inputs)
78
+ #c1 = tf.keras.layers.Dropout(0.1)(c1)
79
+ c1 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
80
+ p1 = tf.keras.layers.MaxPooling2D((2, 2))(c1)
81
+
82
+ c2 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
83
+ #c2 = tf.keras.layers.Dropout(0.1)(c2)
84
+ c2 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
85
+ p2 = tf.keras.layers.MaxPooling2D((2, 2))(c2)
86
+
87
+ c3 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
88
+ #c3 = tf.keras.layers.Dropout(0.2)(c3)
89
+ c3 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
90
+ p3 = tf.keras.layers.MaxPooling2D((2, 2))(c3)
91
+
92
+ c4 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
93
+ #c4 = tf.keras.layers.Dropout(0.2)(c4)
94
+ c4 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
95
+ p4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(c4)
96
+
97
+ c5 = tf.keras.layers.Conv2D(256 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
98
+ #c5 = tf.keras.layers.Dropout(0.5)(c5)
99
+ c5 = tf.keras.layers.Conv2D(256 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
100
+ c5 = tf.keras.layers.Dropout(0.5)(c5)
101
+
102
+ #Expansive path
103
+ u6 = tf.keras.layers.Conv2DTranspose(128 * n, (2, 2), strides=(2, 2), padding='same')(c5)
104
+ u6 = tf.keras.layers.concatenate([u6, c4])
105
+ c6 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
106
+ #c6 = tf.keras.layers.Dropout(0.2)(c6)
107
+ c6 = tf.keras.layers.Conv2D(128 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
108
+
109
+ u7 = tf.keras.layers.Conv2DTranspose(64 * n, (2, 2), strides=(2, 2), padding='same')(c6)
110
+ u7 = tf.keras.layers.concatenate([u7, c3])
111
+ c7 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
112
+ #c7 = tf.keras.layers.Dropout(0.2)(c7)
113
+ c7 = tf.keras.layers.Conv2D(64 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
114
+
115
+ u8 = tf.keras.layers.Conv2DTranspose(32 * n, (2, 2), strides=(2, 2), padding='same')(c7)
116
+ u8 = tf.keras.layers.concatenate([u8, c2])
117
+ c8 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
118
+ #c8 = tf.keras.layers.Dropout(0.1)(c8)
119
+ c8 = tf.keras.layers.Conv2D(32 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
120
+
121
+ u9 = tf.keras.layers.Conv2DTranspose(16 * n, (2, 2), strides=(2, 2), padding='same')(c8)
122
+ u9 = tf.keras.layers.concatenate([u9, c1], axis=3)
123
+ c9 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
124
+ #c9 = tf.keras.layers.Dropout(0.1)(c9)
125
+ c9 = tf.keras.layers.Conv2D(16 * n, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
126
+
127
+ outputs = tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
128
+
129
+ model = tf.keras.Model(inputs = [inputs], outputs = [outputs])
130
+ #model.compile(optimizer = optimizer, loss = model_loss, metrics = [dice_coef])
131
+ #model.summary()
132
+
133
+ return model
134
+
135
+ # From pix2pix (https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/pix2pix/pix2pix.py)
136
+ #
137
+ def pix2pix_upsample(filters, size, norm_type='batchnorm', apply_dropout=False):
138
+ """Upsamples an input.
139
+ Conv2DTranspose => Batchnorm => Dropout => Relu
140
+ Args:
141
+ filters: number of filters
142
+ size: filter size
143
+ norm_type: Normalization type; either 'batchnorm' or 'instancenorm'.
144
+ apply_dropout: If True, adds the dropout layer
145
+ Returns:
146
+ Upsample Sequential Model
147
+ """
148
+
149
+ initializer = tf.random_normal_initializer(0., 0.02)
150
+
151
+ result = tf.keras.Sequential()
152
+ result.add(
153
+ tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
154
+ padding='same',
155
+ kernel_initializer=initializer,
156
+ use_bias=False))
157
+
158
+ if norm_type.lower() == 'batchnorm':
159
+ result.add(tf.keras.layers.BatchNormalization())
160
+ elif norm_type.lower() == 'instancenorm':
161
+ result.add(InstanceNormalization())
162
+
163
+ if apply_dropout:
164
+ result.add(tf.keras.layers.Dropout(0.5))
165
+
166
+ result.add(tf.keras.layers.ReLU())
167
+
168
+ return result
169
+
170
+ # Adapted From: https://www.tensorflow.org/tutorials/images/segmentation
171
+
172
+ def unet_model(output_channels:int, input_shape=[128, 128, 3],
173
+ optimizer = 'adam', model_loss = 'binary_crossentropy'):
174
+ base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape, include_top=False)
175
+
176
+ # Use the activations of these layers
177
+ layer_names = [
178
+ 'block_1_expand_relu', # 64x64
179
+ 'block_3_expand_relu', # 32x32
180
+ 'block_6_expand_relu', # 16x16
181
+ 'block_13_expand_relu', # 8x8
182
+ 'block_16_project', # 4x4
183
+ ]
184
+ base_model_outputs = [base_model.get_layer(name).output for name in layer_names]
185
+
186
+ # Create the feature extraction model
187
+ down_stack = tf.keras.Model(inputs=base_model.input, outputs=base_model_outputs)
188
+
189
+ down_stack.trainable = False
190
+
191
+ up_stack = [
192
+ pix2pix_upsample(512, 3), # 4x4 -> 8x8
193
+ pix2pix_upsample(256, 3), # 8x8 -> 16x16
194
+ pix2pix_upsample(128, 3), # 16x16 -> 32x32
195
+ pix2pix_upsample(64, 3), # 32x32 -> 64x64
196
+ ]
197
+
198
+ inputs = tf.keras.layers.Input(shape=input_shape)
199
+
200
+ # Downsampling through the model
201
+ skips = down_stack(inputs)
202
+ x = skips[-1]
203
+ skips = reversed(skips[:-1])
204
+
205
+ # Upsampling and establishing the skip connections
206
+ for up, skip in zip(up_stack, skips):
207
+ x = up(x)
208
+ concat = tf.keras.layers.Concatenate()
209
+ x = concat([x, skip])
210
+
211
+ # This is the last layer of the model
212
+ last = tf.keras.layers.Conv2DTranspose(
213
+ filters=output_channels, kernel_size=3, strides=2,
214
+ padding='same') #64x64 -> 128x128
215
+
216
+ x = last(x)
217
+
218
+ model = tf.keras.Model(inputs=inputs, outputs=x)
219
+
220
+ model.compile(optimizer=optimizer,
221
+ #loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
222
+ loss=model_loss,
223
+ metrics=['acc', dice_coef]
224
+ )
225
+
226
+
227
+ return model
src/aix/np_metrics.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Metrics between masks in numpy
2
+ import numpy as np
3
+
4
+
5
+ def dice(img1: np.ndarray, img2: np.ndarray, epsilon=0.00001):
6
+ assert np.min(img1) >= 0.
7
+ assert np.max(img1) <= 1.
8
+ assert np.min(img2) >= 0.
9
+ assert np.max(img2) <= 1.
10
+ return (np.sum(np.multiply(img1, img2)) * 2 + epsilon) / (np.sum(img1) + np.sum(img2) + epsilon)
11
+
12
+
13
+ def harden(img: np.ndarray):
14
+ h_img = np.zeros_like(img, dtype=np.uint8)
15
+ h_img[img > 0.5] = 1
16
+ return h_img
17
+
18
+ def restrict(m: np.ndarray):
19
+ rm = np.copy(m)
20
+ rm[rm<0]=0.
21
+ rm[rm>1]=1.
22
+ return rm
23
+
24
+ def check_mask(m):
25
+ if np.min(m) < 0.:
26
+ print("WARNING: Mask contains pixels below 0, constraining it to be over 0")
27
+ if np.max(m) > 1.:
28
+ print("WARNING: Mask contains pixels over 1, constraining them to be below 1")
29
+ assert np.min(m) >= 0.
30
+ assert np.max(m) <= 1.
31
+
32
+ def hard_dice(img1: np.ndarray, img2: np.ndarray, epsilon=0.00001):
33
+ check_mask(img1)
34
+ check_mask(img2)
35
+ return dice(harden(img1), harden(img2), epsilon)
36
+
37
+
38
+ def rel_size_diff(img_r: np.ndarray, img: np.ndarray, epsilon=0.00001):
39
+
40
+ assert np.min(img) >= 0.
41
+ assert np.max(img) <= 1.
42
+ s_r = np.sum(img_r)
43
+ s = np.sum(img)
44
+ return np.abs(s_r-s) / s_r
src/aix/utils.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gzip
3
+ from pathlib import Path
4
+
5
+ import cv2
6
+
7
+ import skimage.morphology
8
+ import skimage.filters.rank
9
+ import skimage.util
10
+
11
+ from tensorflow.keras import backend as K
12
+ import tensorflow as tf
13
+ import keras
14
+
15
+ from aix import logger
16
+ import aix.constants as C
17
+
18
+
19
+
20
+ def dice_coef(y_true, y_pred, smooth = .0001):
21
+
22
+ intersection = K.sum(y_true * y_pred, axis = [1, 2, 3])
23
+ union = K.sum(y_true, axis = [1, 2, 3]) + K.sum(y_pred, axis = [1, 2, 3])
24
+ dice = K.mean((2. * intersection + smooth) / (union + smooth), axis = 0)
25
+
26
+ return dice
27
+
28
+ def harden(y, threshold=0.5):
29
+ y2 = tf.where(y>threshold,1.,0.)
30
+ return y2
31
+
32
+ @keras.saving.register_keras_serializable(package="aix.utils")
33
+ def hardened_dice_coef(y_true, y_pred, smooth = .0001):
34
+ y_true2 = harden(y_true)
35
+ y_pred2 = harden(y_pred)
36
+ return dice_coef(y_true2,y_pred2)
37
+
38
+ def dice_coef_loss(y_true, y_pred):
39
+
40
+ loss = - dice_coef(y_true, y_pred)
41
+
42
+ return loss
43
+
44
+ def local_entropy(im, kernel_size=5, normalize=True):
45
+ kernel=skimage.morphology.disk(kernel_size)
46
+ entr_img = skimage.filters.rank.entropy(skimage.util.img_as_ubyte(im), kernel)
47
+ if normalize:
48
+ max_img = np.max(entr_img)
49
+ entr_img = (entr_img*255/max_img).astype(np.uint8)
50
+ return entr_img
51
+
52
+ def calc_dim(contour):
53
+ c_0 = [ point[0][0] for point in contour]
54
+ c_1 = [ point[0][1] for point in contour]
55
+ return (min(c_0), max(c_0), min(c_1), max(c_1))
56
+
57
+ def calc_size(dim):
58
+ return (dim[1] - dim[0]) * (dim[3] - dim[2])
59
+
60
+ def calc_dist(dim1, dim2):
61
+ return None
62
+
63
+ def extract_roi(img, threshold=135, kernel_size=5, min_fratio=.3, max_sratio=5, filled=True, border=.01):
64
+
65
+ entr_img = local_entropy(img, kernel_size=kernel_size)
66
+ _, mask = cv2.threshold(entr_img, threshold, 255, cv2.THRESH_BINARY)
67
+
68
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
69
+
70
+ contours_d = [calc_dim(c) for c in contours]
71
+ contours_sizes = [calc_size(c) for c in contours_d]
72
+ contour_indices = np.argsort(contours_sizes)[::-1]
73
+
74
+ # remove artifacts
75
+ fratio = min_fratio
76
+ sratio = max_sratio
77
+ idx = -1
78
+ while fratio<=min_fratio or sratio>=max_sratio:
79
+ idx += 1
80
+ biggest = contour_indices[idx]
81
+ filled_mask = np.zeros(img.shape, dtype=np.uint8)
82
+ filled_mask = cv2.fillPoly(filled_mask, [contours[biggest]], 255)
83
+ fratio = filled_mask.sum()/255/contours_sizes[biggest]
84
+ cdim = contours_d[biggest]
85
+ sratio = (cdim[3]-cdim[2])/(cdim[1]-cdim[0])
86
+ if sratio<1: sratio = 1 / sratio
87
+ #print(fratio, sratio, cdim, filled_mask.sum()//255)
88
+
89
+ # generating the mask
90
+ filled_mask = np.zeros(img.shape, dtype=np.uint8)
91
+
92
+ extra = ( int(img.shape[0] * border) , int(img.shape[1] * border) )
93
+ origin = (max(0, cdim[0]-extra[1]), max(0, cdim[2]-extra[0]))
94
+ to = (min(img.shape[1]-1 , cdim[1]+extra[1]), min(img.shape[0]-1 , cdim[3]+extra[0]))
95
+
96
+ if filled:
97
+ filled_mask = cv2.rectangle(filled_mask, origin, to, 255, -1)
98
+ else:
99
+ filled_mask = cv2.rectangle(filled_mask, origin, to, 255, 2)
100
+
101
+ return filled_mask, origin, to
102
+
103
+ def preprocessor(input_img, img_rows, img_cols):
104
+ """
105
+ Resize input images to constants sizes
106
+ :param input_img: numpy array of images
107
+ :return: numpy array of preprocessed images
108
+ """
109
+ logger.debug("Preprocessing...")
110
+
111
+ input_img = np.swapaxes(input_img, 2, 3)
112
+ input_img = np.swapaxes(input_img, 1, 2)
113
+
114
+ logger.debug("Input: " + str(input_img.shape))
115
+
116
+ output_img = np.ndarray((input_img.shape[0], input_img.shape[1], img_rows, img_cols), dtype = np.uint8)
117
+ #print("INPUT")
118
+ #print(input_img.shape)
119
+ for i in range(input_img.shape[0]):
120
+ output_img[i, 0] = cv2.resize(input_img[i, 0], (img_cols, img_rows), interpolation = cv2.INTER_AREA)
121
+ #print("OUTPUT")
122
+ #print(output_img.shape)
123
+ output_img = np.swapaxes(output_img, 1, 2)
124
+ output_img = np.swapaxes(output_img, 2, 3)
125
+
126
+ logger.debug("Output: " + str(output_img.shape))
127
+
128
+ return output_img
129
+
130
+ def load_train_data(imgs_path, masks_path):
131
+ """
132
+ Load training data from project path
133
+ :return: [X_train, y_train] numpy arrays containing the training data and their respective masks.
134
+ """
135
+
136
+ logger.debug("\nLoading train data ...\n")
137
+
138
+ X_train = np.load(gzip.open(imgs_path))
139
+ y_train = np.load(gzip.open(masks_path))
140
+
141
+ logger.debug(X_train.shape)
142
+ logger.debug(y_train.shape)
143
+
144
+ X_train = preprocessor(X_train, C.IMG_WIDTH, C.IMG_HEIGHT)
145
+ y_train = preprocessor(y_train, C.IMG_WIDTH, C.IMG_HEIGHT)
146
+
147
+ X_train = X_train.astype('float32')/255
148
+
149
+ mean = np.mean(X_train) # mean for data centering
150
+ std = np.std(X_train) # std for data normalization
151
+
152
+ X_train -= mean
153
+ X_train /= std
154
+
155
+ y_train = y_train.astype('float32')
156
+
157
+ return X_train, y_train
158
+
159
+ def process_data(X, y):
160
+
161
+ logger.debug("\nLoading train data ...\n")
162
+
163
+ logger.debug(X.shape)
164
+ logger.debug(y.shape)
165
+
166
+ X = preprocessor(X, C.IMG_WIDTH, C.IMG_HEIGHT)
167
+ y = preprocessor(y, C.IMG_WIDTH, C.IMG_HEIGHT)
168
+
169
+ X = X.astype('float32')
170
+ y = y.astype('float32')
171
+
172
+ return X, y
173
+
174
+ def load_skin_train_data(imgs_path, masks_path, img_width, img_height):
175
+ """
176
+ Load training data from project path
177
+ :return: [X_train, y_train] numpy arrays containing the training data and their respective masks.
178
+ """
179
+
180
+ logger.debug("\nLoading train data ...\n")
181
+
182
+ X_train = np.load(gzip.open(imgs_path))
183
+ y_train = np.load(gzip.open(masks_path))
184
+
185
+ logger.debug(X_train.shape)
186
+ logger.debug(y_train.shape)
187
+
188
+ X_train = preprocessor(X_train, C.IMG_WIDTH, C.IMG_HEIGHT)
189
+ y_train = preprocessor(y_train, C.IMG_WIDTH, C.IMG_HEIGHT)
190
+
191
+ X_train = X_train.astype('float32')
192
+
193
+ mean = np.mean(X_train) # mean for data centering
194
+ std = np.std(X_train) # std for data normalization
195
+
196
+ X_train -= mean
197
+ X_train /= std
198
+
199
+ y_train = y_train.astype('float32')
200
+ y_train /= 255.
201
+
202
+ return X_train, y_train