code
stringlengths
17
6.64M
def shape2d(a): '\n a: a int or tuple/list of length 2\n ' if (type(a) == int): return [a, a] if isinstance(a, (list, tuple)): assert (len(a) == 2) return list(a) raise RuntimeError('Illegal shape: {}'.format(a))
class StoppableThread(threading.Thread): "\n A thread that has a 'stop' event.\n " def __init__(self): super(StoppableThread, self).__init__() self._stop_evt = threading.Event() def stop(self): ' stop the thread' self._stop_evt.set() def stopped(self): ' check whether the thread is stopped or not' return self._stop_evt.isSet() def queue_put_stoppable(self, q, obj): ' put obj to queue, but will give up if the thread is stopped' while (not self.stopped()): try: q.put(obj, timeout=5) break except queue.Full: pass def queue_get_stoppable(self, q): ' take obj from queue, but will give up if the thread is stopped' while (not self.stopped()): try: return q.get(timeout=5) except queue.Empty: pass
class LoopThread(StoppableThread): ' A pausable thread that simply runs a loop' def __init__(self, func, pausable=True): '\n :param func: the function to run\n ' super(LoopThread, self).__init__() self._func = func self._pausable = pausable if pausable: self._lock = threading.Lock() self.daemon = True def run(self): while (not self.stopped()): if self._pausable: self._lock.acquire() self._lock.release() self._func() def pause(self): assert self._pausable self._lock.acquire() def resume(self): assert self._pausable self._lock.release()
class DIE(object): ' A placeholder class indicating end of queue ' pass
def ensure_proc_terminate(proc): if isinstance(proc, list): for p in proc: ensure_proc_terminate(p) return def stop_proc_by_weak_ref(ref): proc = ref() if (proc is None): return if (not proc.is_alive()): return proc.terminate() proc.join() assert isinstance(proc, multiprocessing.Process) atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))
@contextmanager def mask_sigint(): sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) (yield) signal.signal(signal.SIGINT, sigint_handler)
def start_proc_mask_signal(proc): if (not isinstance(proc, list)): proc = [proc] with mask_sigint(): for p in proc: p.start()
def subproc_call(cmd, timeout=None): try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout) return output except subprocess.TimeoutExpired as e: logger.warn('Command timeout!') logger.warn(e.output) except subprocess.CalledProcessError as e: logger.warn('Commnad failed: {}'.format(e.returncode)) logger.warn(e.output)
class OrderedContainer(object): '\n Like a priority queue, but will always wait for item with index (x+1) before producing (x+2).\n ' def __init__(self, start=0): self.ranks = [] self.data = [] self.wait_for = start def put(self, rank, val): idx = bisect.bisect(self.ranks, rank) self.ranks.insert(idx, rank) self.data.insert(idx, val) def has_next(self): if (len(self.ranks) == 0): return False return (self.ranks[0] == self.wait_for) def get(self): assert self.has_next() ret = self.data[0] rank = self.ranks[0] del self.ranks[0] del self.data[0] self.wait_for += 1 return (rank, ret)
class OrderedResultGatherProc(multiprocessing.Process): '\n Gather indexed data from a data queue, and produce results with the\n original index-based order.\n ' def __init__(self, data_queue, nr_producer, start=0): '\n :param data_queue: a multiprocessing.Queue to produce input dp\n :param nr_producer: number of producer processes. Will terminate after receiving this many of DIE sentinel.\n :param start: the first task index\n ' super(OrderedResultGatherProc, self).__init__() self.data_queue = data_queue self.ordered_container = OrderedContainer(start=start) self.result_queue = multiprocessing.Queue() self.nr_producer = nr_producer def run(self): nr_end = 0 try: while True: (task_id, data) = self.data_queue.get() if (task_id == DIE): self.result_queue.put((task_id, data)) nr_end += 1 if (nr_end == self.nr_producer): return else: self.ordered_container.put(task_id, data) while self.ordered_container.has_next(): self.result_queue.put(self.ordered_container.get()) except Exception as e: import traceback traceback.print_exc() raise e def get(self): return self.result_queue.get()
def enable_call_trace(): def tracer(frame, event, arg): if (event == 'call'): co = frame.f_code func_name = co.co_name if ((func_name == 'write') or (func_name == 'print')): return func_line_no = frame.f_lineno func_filename = co.co_filename caller = frame.f_back if caller: caller_line_no = caller.f_lineno caller_filename = caller.f_code.co_filename print(('Call to `%s` on line %s:%s from %s:%s' % (func_name, func_filename, func_line_no, caller_filename, caller_line_no))) return sys.settrace(tracer)
@memoized def log_once(s): logger.warn(s)
@six.add_metaclass(ABCMeta) class Discretizer(object): @abstractmethod def get_nr_bin(self): pass @abstractmethod def get_bin(self, v): pass
class Discretizer1D(Discretizer): pass
class UniformDiscretizer1D(Discretizer1D): def __init__(self, minv, maxv, spacing): '\n :params minv: minimum value of the first bin\n :params maxv: maximum value of the last bin\n :param spacing: width of a bin\n ' self.minv = float(minv) self.maxv = float(maxv) self.spacing = float(spacing) self.nr_bin = int(np.ceil(((self.maxv - self.minv) / self.spacing))) def get_nr_bin(self): return self.nr_bin def get_bin(self, v): if (v < self.minv): log_once('UniformDiscretizer1D: value smaller than min!') return 0 if (v > self.maxv): log_once('UniformDiscretizer1D: value larger than max!') return (self.nr_bin - 1) return int(np.clip(((v - self.minv) / self.spacing), 0, (self.nr_bin - 1))) def get_bin_center(self, bin_id): return (self.minv + (self.spacing * (bin_id + 0.5))) def get_distribution(self, v, smooth_factor=0.05, smooth_radius=2): ' return a smoothed one-hot distribution of the sample v.\n ' b = self.get_bin(v) ret = np.zeros((self.nr_bin,), dtype='float32') ret[b] = 1.0 if ((v >= self.maxv) or (v <= self.minv)): return ret try: for k in range(1, (smooth_radius + 1)): ret[(b + k)] = (smooth_factor ** k) except IndexError: pass for k in range(1, min((smooth_radius + 1), (b + 1))): ret[(b - k)] = (smooth_factor ** k) ret /= ret.sum() return ret
class UniformDiscretizerND(Discretizer): def __init__(self, *min_max_spacing): '\n :params min_max_spacing: (minv, maxv, spacing) for each dimension\n ' self.n = len(min_max_spacing) self.discretizers = [UniformDiscretizer1D(*k) for k in min_max_spacing] self.nr_bins = [k.get_nr_bin() for k in self.discretizers] def get_nr_bin(self): return np.prod(self.nr_bins) def get_bin(self, v): assert (len(v) == self.n) bin_id = [self.discretizers[k].get_bin(v[k]) for k in range(self.n)] return self.get_bin_from_nd_bin_ids(bin_id) def get_nd_bin_ids(self, bin_id): ret = [] for k in reversed(list(range(self.n))): nr = self.nr_bins[k] v = (bin_id % nr) bin_id = (bin_id / nr) ret.append(v) return list(reversed(ret)) def get_bin_from_nd_bin_ids(self, bin_ids): (acc, res) = (1, 0) for k in reversed(list(range(self.n))): res += (bin_ids[k] * acc) acc *= self.nr_bins[k] return res def get_nr_bin_nd(self): return self.nr_bins def get_bin_center(self, bin_id): bin_id_nd = self.get_nd_bin_ids(bin_id) return [self.discretizers[k].get_bin_center(bin_id_nd[k]) for k in range(self.n)]
def mkdir_p(dirname): ' make a dir recursively, but do nothing if the dir exists' assert (dirname is not None) if ((dirname == '') or os.path.isdir(dirname)): return try: os.makedirs(dirname) except OSError as e: if (e.errno != errno.EEXIST): raise e
def download(url, dir): mkdir_p(dir) fname = url.split('/')[(- 1)] fpath = os.path.join(dir, fname) def _progress(count, block_size, total_size): sys.stdout.write(('\r>> Downloading %s %.1f%%' % (fname, (min((float((count * block_size)) / total_size), 1.0) * 100.0)))) sys.stdout.flush() try: (fpath, _) = urllib.request.urlretrieve(url, fpath, reporthook=_progress) statinfo = os.stat(fpath) size = statinfo.st_size except: logger.error('Failed to download {}'.format(url)) raise assert (size > 0), 'Download an empty file!' sys.stdout.write('\n') print((((('Succesfully downloaded ' + fname) + ' ') + str(size)) + ' bytes.')) return fpath
def recursive_walk(rootdir): for (r, dirs, files) in os.walk(rootdir): for f in files: (yield os.path.join(r, f))
def use_global_argument(args): '\n Add the content of argparse.Namespace to globalns\n :param args: Argument\n ' assert isinstance(args, argparse.Namespace), type(args) for (k, v) in six.iteritems(vars(args)): setattr(globalns, k, v)
def change_gpu(val): val = str(val) if (val == '-1'): val = '' return change_env('CUDA_VISIBLE_DEVICES', val)
def get_nr_gpu(): env = os.environ.get('CUDA_VISIBLE_DEVICES', None) assert (env is not None), 'gpu not set!' return len(env.split(','))
def get_gpus(): ' return a list of GPU physical id' env = os.environ.get('CUDA_VISIBLE_DEVICES', None) assert (env is not None), 'gpu not set!' return map(int, env.strip().split(','))
class CaffeLayerProcessor(object): def __init__(self, net): self.net = net self.layer_names = net._layer_names self.param_dict = {} self.processors = {'Convolution': self.proc_conv, 'InnerProduct': self.proc_fc, 'BatchNorm': self.proc_bn, 'Scale': self.proc_scale} def process(self): for (idx, layer) in enumerate(self.net.layers): param = layer.blobs name = self.layer_names[idx] if (layer.type in self.processors): logger.info('Processing layer {} of type {}'.format(name, layer.type)) dic = self.processors[layer.type](idx, name, param) self.param_dict.update(dic) elif (len(layer.blobs) != 0): logger.warn('{} layer contains parameters but is not supported!'.format(layer.type)) return self.param_dict def proc_conv(self, idx, name, param): assert (len(param) <= 2) assert (param[0].data.ndim == 4) W = param[0].data.transpose(2, 3, 1, 0) if (len(param) == 1): return {(name + '/W'): W} else: return {(name + '/W'): W, (name + '/b'): param[1].data} def proc_fc(self, idx, name, param): assert (len(param) == 2) prev_layer_name = self.net.bottom_names[name][0] prev_layer_output = self.net.blobs[prev_layer_name].data if (prev_layer_output.ndim == 4): logger.info('FC layer {} takes spatial data.'.format(name)) W = param[0].data W = W.reshape((((- 1),) + prev_layer_output.shape[1:])).transpose(2, 3, 1, 0) else: W = param[0].data.transpose() return {(name + '/W'): W, (name + '/b'): param[1].data} def proc_bn(self, idx, name, param): assert (param[2].data[0] == 1.0) return {(name + '/mean/EMA'): param[0].data, (name + '/variance/EMA'): param[1].data} def proc_scale(self, idx, name, param): bottom_name = self.net.bottom_names[name][0] for (i, layer) in enumerate(self.net.layers): if (layer.type == 'BatchNorm'): name2 = self.layer_names[i] bottom_name2 = self.net.bottom_names[name2][0] if (bottom_name2 == bottom_name): logger.info('Merge {} and {} into one BatchNorm layer'.format(name, name2)) return {(name2 + '/beta'): param[1].data, (name2 + '/gamma'): param[0].data} logger.error('Could not find a BN layer corresponding to this Scale layer!') raise ValueError()
def load_caffe(model_desc, model_file): '\n :return: a dict of params\n ' with change_env('GLOG_minloglevel', '2'): import caffe caffe.set_mode_cpu() net = caffe.Net(model_desc, model_file, caffe.TEST) param_dict = CaffeLayerProcessor(net).process() logger.info(('Model loaded from caffe. Params: ' + ' '.join(sorted(param_dict.keys())))) return param_dict
def get_caffe_pb(): dir = get_dataset_path('caffe') caffe_pb_file = os.path.join(dir, 'caffe_pb2.py') if (not os.path.isfile(caffe_pb_file)): assert os.path.isfile(os.path.join(dir, 'caffe.proto')) ret = os.system('cd {} && protoc caffe.proto --python_out .'.format(dir)) assert (ret == 0), 'Command `protoc caffe.proto --python_out .` failed!' import imp return imp.load_source('caffepb', caffe_pb_file)
class _MyFormatter(logging.Formatter): def format(self, record): date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green') msg = '%(message)s' if (record.levelno == logging.WARNING): fmt = ((((date + ' ') + colored('WRN', 'red', attrs=['blink'])) + ' ') + msg) elif ((record.levelno == logging.ERROR) or (record.levelno == logging.CRITICAL)): fmt = ((((date + ' ') + colored('ERR', 'red', attrs=['blink', 'underline'])) + ' ') + msg) else: fmt = ((date + ' ') + msg) if hasattr(self, '_style'): self._style._fmt = fmt self._fmt = fmt return super(_MyFormatter, self).format(record)
def _getlogger(): logger = logging.getLogger('tensorpack') logger.propagate = False logger.setLevel(logging.INFO) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S')) logger.addHandler(handler) return logger
def get_time_str(): return datetime.now().strftime('%m%d-%H%M%S')
def _set_file(path): if os.path.isfile(path): backup_name = ((path + '.') + get_time_str()) shutil.move(path, backup_name) info("Log file '{}' backuped to '{}'".format(path, backup_name)) hdl = logging.FileHandler(filename=path, encoding='utf-8', mode='w') hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S')) _logger.addHandler(hdl) _logger.info(('Argv: ' + ' '.join(sys.argv)))
def set_logger_dir(dirname, action=None): '\n Set the directory for global logging.\n :param dirname: log directory\n :param action: an action (k/b/d/n) to be performed. Will ask user by default.\n ' global LOG_FILE, LOG_DIR if os.path.isdir(dirname): if (not action): _logger.warn('Directory {} exists! Please either backup/delete it, or use a new directory.'.format(dirname)) _logger.warn("If you're resuming from a previous run you can choose to keep it.") _logger.info('Select Action: k (keep) / b (backup) / d (delete) / n (new):') while (not action): action = input().lower().strip() act = action if (act == 'b'): backup_name = (dirname + get_time_str()) shutil.move(dirname, backup_name) info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) elif (act == 'd'): shutil.rmtree(dirname) elif (act == 'n'): dirname = (dirname + get_time_str()) info('Use a new log directory {}'.format(dirname)) elif (act == 'k'): pass else: raise ValueError('Unknown action: {}'.format(act)) LOG_DIR = dirname from .fs import mkdir_p mkdir_p(dirname) LOG_FILE = os.path.join(dirname, 'log.log') _set_file(LOG_FILE)
def disable_logger(): ' disable all logging ability from this moment' for func in _LOGGING_METHOD: globals()[func] = (lambda x: None)
def auto_set_dir(action=None, overwrite=False): " set log directory to a subdir inside 'train_log', with the name being\n the main python file currently running" if ((LOG_DIR is not None) and (not overwrite)): return mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) set_logger_dir(os.path.join('train_log', basename[:basename.rfind('.')]), action=action)
def warn_dependency(name, dependencies): warn("Failed to import '{}', {} won't be available'".format(dependencies, name))
class LookUpTable(object): def __init__(self, objlist): self.idx2obj = dict(enumerate(objlist)) self.obj2idx = {v: k for (k, v) in six.iteritems(self.idx2obj)} def size(self): return len(self.idx2obj) def get_obj(self, idx): return self.idx2obj[idx] def get_idx(self, obj): return self.obj2idx[obj] def __str__(self): return self.idx2obj.__str__()
def dumps(obj): return msgpack.dumps(obj, use_bin_type=True)
def loads(buf): return msgpack.loads(buf)
class StatCounter(object): ' A simple counter' def __init__(self): self.reset() def feed(self, v): self._values.append(v) def reset(self): self._values = [] @property def count(self): return len(self._values) @property def average(self): assert len(self._values) return np.mean(self._values) @property def sum(self): assert len(self._values) return np.sum(self._values) @property def max(self): assert len(self._values) return max(self._values)
class RatioCounter(object): ' A counter to count ratio of something' def __init__(self): self.reset() def reset(self): self._tot = 0 self._cnt = 0 def feed(self, cnt, tot=1): self._tot += tot self._cnt += cnt @property def ratio(self): if (self._tot == 0): return 0 return ((self._cnt * 1.0) / self._tot) @property def count(self): return self._tot
class Accuracy(RatioCounter): ' A RatioCounter with a fancy name ' @property def accuracy(self): return self.ratio
class BinaryStatistics(object): '\n Statistics for binary decision,\n including precision, recall, false positive, false negative\n ' def __init__(self): self.reset() def reset(self): self.nr_pos = 0 self.nr_neg = 0 self.nr_pred_pos = 0 self.nr_pred_neg = 0 self.corr_pos = 0 self.corr_neg = 0 def feed(self, pred, label): '\n :param pred: 0/1 np array\n :param label: 0/1 np array of the same size\n ' assert (pred.shape == label.shape) self.nr_pos += (label == 1).sum() self.nr_neg += (label == 0).sum() self.nr_pred_pos += (pred == 1).sum() self.nr_pred_neg += (pred == 0).sum() self.corr_pos += ((pred == 1) & (pred == label)).sum() self.corr_neg += ((pred == 0) & (pred == label)).sum() @property def precision(self): if (self.nr_pred_pos == 0): return 0 return ((self.corr_pos * 1.0) / self.nr_pred_pos) @property def recall(self): if (self.nr_pos == 0): return 0 return ((self.corr_pos * 1.0) / self.nr_pos) @property def false_positive(self): if (self.nr_pred_pos == 0): return 0 return (1 - self.precision) @property def false_negative(self): if (self.nr_pos == 0): return 0 return (1 - self.recall)
class OnlineMoments(object): 'Compute 1st and 2nd moments online\n See algorithm at: https://www.wikiwand.com/en/Algorithms_for_calculating_variance#/Online_algorithm\n ' def __init__(self): self._mean = 0 self._M2 = 0 self._n = 0 def feed(self, x): self._n += 1 delta = (x - self._mean) self._mean += (delta * (1.0 / self._n)) delta2 = (x - self._mean) self._M2 += (delta * delta2) @property def mean(self): return self._mean @property def variance(self): return (self._M2 / (self._n - 1)) @property def std(self): return np.sqrt(self.variance)
class IterSpeedCounter(object): ' To count how often some code gets reached' def __init__(self, print_every, name=None): self.cnt = 0 self.print_every = int(print_every) self.name = (name if name else 'IterSpeed') def reset(self): self.start = time.time() def __call__(self): if (self.cnt == 0): self.reset() self.cnt += 1 if ((self.cnt % self.print_every) != 0): return t = (time.time() - self.start) logger.info('{}: {:.2f} sec, {} times, {:.3g} sec/time'.format(self.name, t, self.cnt, (t / self.cnt)))
@contextmanager def timed_operation(msg, log_start=False): if log_start: logger.info('Start {} ...'.format(msg)) start = time.time() (yield) logger.info('{} finished, time:{:.2f}sec.'.format(msg, (time.time() - start)))
@contextmanager def total_timer(msg): start = time.time() (yield) t = (time.time() - start) _TOTAL_TIMER_DATA[msg].feed(t)
def print_total_timer(): if (len(_TOTAL_TIMER_DATA) == 0): return for (k, v) in six.iteritems(_TOTAL_TIMER_DATA): logger.info('Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time'.format(k, v.sum, v.count, v.average))
@contextmanager def change_env(name, val): oldval = os.environ.get(name, None) os.environ[name] = val (yield) if (oldval is None): del os.environ[name] else: os.environ[name] = oldval
def get_rng(obj=None): ' obj: some object to use to generate random seed' seed = (((id(obj) + os.getpid()) + int(datetime.now().strftime('%Y%m%d%H%M%S%f'))) % 4294967295) return np.random.RandomState(seed)
def execute_only_once(): '\n when called with:\n if execute_only_once():\n # do something\n The body is guranteed to be executed only the first time.\n ' f = inspect.currentframe().f_back ident = (f.f_code.co_filename, f.f_lineno) if (ident in _EXECUTE_HISTORY): return False _EXECUTE_HISTORY.add(ident) return True
def get_dataset_path(*args): d = os.environ.get('TENSORPACK_DATASET', None) if (d is None): d = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'dataflow', 'dataset')) if execute_only_once(): from . import logger logger.info('TENSORPACK_DATASET not set, using {} for dataset.'.format(d)) assert os.path.isdir(d), d return os.path.join(d, *args)
def get_tqdm_kwargs(**kwargs): default = dict(smoothing=0.5, dynamic_ncols=True, ascii=True, bar_format='{l_bar}{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_noinv_fmt}]') f = kwargs.get('file', sys.stderr) if f.isatty(): default['mininterval'] = 0.5 else: default['mininterval'] = 60 default.update(kwargs) return default
def get_tqdm(**kwargs): return tqdm(**get_tqdm_kwargs(**kwargs))
def ukbiobank_data(): data_dir = config.data_root code_dir = config.code_root statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt') doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt') base_slices_file = os.path.join(code_dir, 'Preprocessing', 'base_slices.txt') with open(base_slices_file) as b_file: base_slices = b_file.readlines() base_slices = [x.strip() for x in base_slices] base_slices = [[int(z) for z in y.split()] for y in base_slices] with open(statistics_file) as s_file: statistics = s_file.readlines() statistics = [x.strip() for x in statistics] statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics] with open(doubtful_case_file) as d_file: doubtful_cases = d_file.readlines() doubtful_cases = [x.strip() for x in doubtful_cases] doubtful_cases = [int(x) for x in doubtful_cases] used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))] print('There will be {} used eids'.format(len(used_statistics))) train_statistics = [x for x in used_statistics if ((x[0] % 5) != 2)] test_statistics = [x for x in used_statistics if ((x[0] % 5) == 2)] train_img_list0 = [] train_img_list1 = [] train_gt_list0 = [] train_gt_list1 = [] test_img_list0 = [] test_img_list1 = [] test_gt_list0 = [] test_gt_list1 = [] train_subject_count = 0 for k in train_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] train_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] first_slice_idx = max((base_slice_t + 1), 0) for s in range(first_slice_idx, slices): s_t_image_file0 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) s_t_image_file1 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) if ((s - 1) != base_slice_t): s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) else: s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((- 1)).zfill(2), str(t).zfill(2))) s_t_image_gt_file1 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) train_img_list0.append(s_t_image_file0) train_img_list1.append(s_t_image_file1) train_gt_list0.append(s_t_image_gt_file0) train_gt_list1.append(s_t_image_gt_file1) test_subject_count = 0 for k in test_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] test_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] first_slice_idx = max((base_slice_t + 1), 0) for s in range(first_slice_idx, slices): s_t_image_file0 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) s_t_image_file1 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) if ((s - 1) != base_slice_t): s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) else: s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((- 1)).zfill(2), str(t).zfill(2))) s_t_image_gt_file1 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) test_img_list0.append(s_t_image_file0) test_img_list1.append(s_t_image_file1) test_gt_list0.append(s_t_image_gt_file0) test_gt_list1.append(s_t_image_gt_file1) print('train_subject_count = {}'.format(train_subject_count)) print('test_subject_count = {}'.format(test_subject_count)) print('train_image_count = {}'.format(len(train_img_list0))) print('test_image_count = {}'.format(len(test_img_list0))) return (train_img_list0, train_img_list1, train_gt_list0, train_gt_list1, test_img_list0, test_img_list1, test_gt_list0, test_gt_list1)
def ukbiobank_data(): data_dir = config.data_root code_dir = config.code_root statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt') doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt') base_slices_file = os.path.join(code_dir, 'Preprocessing', 'base_slices.txt') with open(base_slices_file) as b_file: base_slices = b_file.readlines() base_slices = [x.strip() for x in base_slices] base_slices = [[int(z) for z in y.split()] for y in base_slices] with open(statistics_file) as s_file: statistics = s_file.readlines() statistics = [x.strip() for x in statistics] statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics] with open(doubtful_case_file) as d_file: doubtful_cases = d_file.readlines() doubtful_cases = [x.strip() for x in doubtful_cases] doubtful_cases = [int(x) for x in doubtful_cases] used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))] print('There will be {} used eids'.format(len(used_statistics))) train_statistics = [x for x in used_statistics if ((x[0] % 5) != 2)] test_statistics = [x for x in used_statistics if ((x[0] % 5) == 2)] train_img_list = [] train_gt_list = [] train_first_slice_list = [] train_end_slice_list = [] train_base_list = [] test_img_list = [] test_gt_list = [] test_first_slice_list = [] test_end_slice_list = [] test_base_list = [] train_subject_count = 0 for k in train_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] train_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] s_t_image_file = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) train_img_list.append(s_t_image_file) train_gt_list.append(s_t_image_gt_file) train_first_slice_list.append(0) train_end_slice_list.append(slices) train_base_list.append(base_slice_t) test_subject_count = 0 for k in test_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] test_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] s_t_image_file = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) test_img_list.append(s_t_image_file) test_gt_list.append(s_t_image_gt_file) test_first_slice_list.append(0) test_end_slice_list.append(slices) test_base_list.append(base_slice_t) print('train_subject_count = {}'.format(train_subject_count)) print('test_subject_count = {}'.format(test_subject_count)) print('train_image_count = {}'.format(len(train_img_list))) print('test_image_count = {}'.format(len(test_img_list))) return (train_img_list, train_gt_list, train_first_slice_list, train_end_slice_list, train_base_list, test_img_list, test_gt_list, test_first_slice_list, test_end_slice_list, test_base_list)
def net_module(input_shape, num_outputs): 'Builds a net architecture.\n Args:\n input_shape: The input shape in the form (nb_rows, nb_cols, nb_channels)\n num_outputs: The number of outputs at final softmax layer\n Returns:\n The keras `Model`.\n ' CHANNEL_AXIS = 3 handle_dim_ordering() if (len(input_shape) != 3): raise Exception('Input shape should be a tuple (nb_rows, nb_cols, nb_channels)') if (K.image_dim_ordering() != 'tf'): input_shape = (input_shape[2], input_shape[0], input_shape[1]) input_img0 = Input(shape=input_shape, name='input_img0') input_img1 = Input(shape=input_shape, name='input_img1') input_mask0 = Input(shape=input_shape, name='input_mask0') input_mask0_one_hot = Lambda(one_hot, arguments={'num_classes': num_outputs})(input_mask0) concatenate = Concatenate(axis=CHANNEL_AXIS, name='concatenate')([input_img0, input_mask0_one_hot]) base_channel = 32 block_conv_1 = conv_bn_leakyrelu_repetition_block(filters=(1 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block1')(input_img1) block_pool_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block2')(block_conv_1) block_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block2')(block_pool_2) block_pool_4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block4')(block_conv_2) block_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block4')(block_pool_4) block_pool_8 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block8')(block_conv_4) block_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block8')(block_pool_8) block_pool_16 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block16')(block_conv_8) block_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block16')(block_pool_16) block_pool_32 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block32')(block_conv_16) block_conv_32 = conv_bn_leakyrelu_repetition_block(filters=(32 * base_channel), kernel_size=(3, 3), repetitions=1, first_layer_down_size=False, alpha=0.1, name='conv_block32')(block_pool_32) block_context_conv_1 = conv_bn_leakyrelu_repetition_block(filters=(1 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block1')(concatenate) block_context_pool_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block2')(block_context_conv_1) block_context_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block2')(block_context_pool_2) block_context_pool_4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block4')(block_context_conv_2) block_context_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block4')(block_context_pool_4) block_context_pool_8 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block8')(block_context_conv_4) block_context_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block8')(block_context_pool_8) block_context_pool_16 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block16')(block_context_conv_8) block_context_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block16')(block_context_pool_16) block_context_pool_32 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block32')(block_context_conv_16) block_context_conv_32 = conv_bn_leakyrelu_repetition_block(filters=(32 * base_channel), kernel_size=(3, 3), repetitions=1, first_layer_down_size=False, alpha=0.1, name='context_conv_block32')(block_context_pool_32) block_concat_32 = Concatenate(axis=CHANNEL_AXIS, name='concat32')([block_conv_32, block_context_conv_32]) block_expan_conv_32 = conv_bn_leakyrelu_repetition_block(filters=(32 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block32')(block_concat_32) block_up_16 = UpSampling2D(size=(2, 2), name='up_block16')(block_expan_conv_32) block_concat_16 = Concatenate(axis=CHANNEL_AXIS, name='concat16')([block_up_16, block_conv_16]) block_expan_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block16')(block_concat_16) block_up_8 = UpSampling2D(size=(2, 2), name='up_block8')(block_expan_conv_16) block_concat_8 = Concatenate(axis=CHANNEL_AXIS, name='concat8')([block_up_8, block_conv_8]) block_expan_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block8')(block_concat_8) block_up_4 = UpSampling2D(size=(2, 2), name='up_block4')(block_expan_conv_8) block_concat_4 = Concatenate(axis=CHANNEL_AXIS, name='concat4')([block_up_4, block_conv_4]) block_expan_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block4')(block_concat_4) block_up_2 = UpSampling2D(size=(2, 2), name='up_block2')(block_expan_conv_4) block_concat_2 = Concatenate(axis=CHANNEL_AXIS, name='concat2')([block_up_2, block_conv_2]) block_expan_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block2')(block_concat_2) block_up_1 = UpSampling2D(size=(2, 2), name='up_block1')(block_expan_conv_2) block_concat_1 = Concatenate(axis=CHANNEL_AXIS, name='concat1')([block_up_1, block_conv_1]) block_expan_conv_1 = conv_bn_leakyrelu_repetition_block(filters=(1 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block1')(block_concat_1) block_seg_4 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block4')(block_expan_conv_4) block_seg_2 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block2')(block_expan_conv_2) block_seg_1 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block1')(block_expan_conv_1) block_seg_up_2 = UpSampling2D(size=(2, 2), name='seg_up_block2')(block_seg_4) block_add_2 = Add(name='add_block2')([block_seg_up_2, block_seg_2]) block_seg_up_1 = UpSampling2D(size=(2, 2), name='seg_up_block1')(block_add_2) prediction = Add(name='prediction')([block_seg_up_1, block_seg_1]) reshape1 = Reshape(((input_shape[0] * input_shape[1]), (input_shape[2] * num_outputs)), name='reshape1')(prediction) prediction_softmax = Activation('softmax', name='softmax')(reshape1) reshape2 = Reshape((input_shape[0], input_shape[1], (input_shape[2] * num_outputs)), name='output')(prediction_softmax) model = Model(inputs=[input_img0, input_img1, input_mask0], outputs=reshape2) return model
def train_lvrv_net(): code_path = config.code_root initial_lr = config.lvrv_net_initial_lr decay_rate = config.lvrv_net_decay_rate batch_size = config.lvrv_net_batch_size input_img_size = config.lvrv_net_imput_img_size epochs = config.lvrv_net_epochs current_epoch = 0 new_start_epoch = current_epoch model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=4) if (current_epoch == 0): print('Building model') else: print('Loading model') model.load_weights(filepath=os.path.join(code_path, 'LVRV_Segmentation', 'model_lvrv_net_epoch{}.h5'.format(str(current_epoch).zfill(3)))) model.compile(optimizer=Adam(lr=initial_lr), loss=dice_coef5_loss, metrics=[dice_coef5, dice_coef5_0, dice_coef5_1, dice_coef5_2, dice_coef5_3]) print('This model has {} parameters'.format(model.count_params())) (train_img_list0, train_img_list1, train_gt_list0, train_gt_list1, test_img_list0, test_img_list1, test_gt_list0, test_gt_list1) = ukbiobank_data() training_sample = len(train_img_list0) img_data_gen_args = dict(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=180.0, width_shift_range=0.15, height_shift_range=0.15, shear_range=0.0, zoom_range=0.15, channel_shift_range=0.0, fill_mode='constant', cval=0.0, horizontal_flip=True, vertical_flip=True, rescale=None, preprocessing_function=mean_variance_normalization5, data_format=K.image_data_format()) mask_data_gen_args = copy.deepcopy(img_data_gen_args) mask_data_gen_args['preprocessing_function'] = elementwise_multiplication print('Creating generators for training') image_datagen0 = ImageDataGenerator2(**img_data_gen_args) image_datagen1 = ImageDataGenerator2(**img_data_gen_args) mask_datagen0 = ImageDataGenerator2(**mask_data_gen_args) mask_datagen1 = ImageDataGenerator2(**mask_data_gen_args) seed = 1 image_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) image_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) mask_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) mask_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) image_generator0 = image_datagen0.flow_from_path_list(path_list=train_img_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) image_generator1 = image_datagen1.flow_from_path_list(path_list=train_img_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) mask_generator0 = mask_datagen0.flow_from_path_list(path_list=train_gt_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) mask_generator1 = mask_datagen1.flow_from_path_list(path_list=train_gt_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) train_generator = izip(image_generator0, image_generator1, mask_generator0, mask_generator1) print('Creating generators for validation') val_image_datagen0 = ImageDataGenerator2(**img_data_gen_args) val_image_datagen1 = ImageDataGenerator2(**img_data_gen_args) val_mask_datagen0 = ImageDataGenerator2(**mask_data_gen_args) val_mask_datagen1 = ImageDataGenerator2(**mask_data_gen_args) val_seed = 2 val_image_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_image_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_mask_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_mask_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_image_generator0 = val_image_datagen0.flow_from_path_list(path_list=test_img_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) val_image_generator1 = val_image_datagen1.flow_from_path_list(path_list=test_img_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) val_mask_generator0 = val_mask_datagen0.flow_from_path_list(path_list=test_gt_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) val_mask_generator1 = val_mask_datagen1.flow_from_path_list(path_list=test_gt_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) validation_generator = izip(val_image_generator0, val_image_generator1, val_mask_generator0, val_mask_generator1) print('Start training') steps = int(math.ceil((float(training_sample) / batch_size))) print('There will be {} epochs with {} steps in each epoch'.format(epochs, steps)) total_step = 0 for epoch in range((new_start_epoch + 1), ((new_start_epoch + epochs) + 1)): print('\n\n##########\nEpoch {}\n##########'.format(epoch)) for step in range(steps): print('\n****** Epoch {} Step {} ******'.format(epoch, step)) (batch_img0, batch_img1, batch_mask0, batch_mask1) = next(train_generator) print(model.train_on_batch([batch_img0, batch_img1, batch_mask0], batch_mask1, sample_weight=None, class_weight=None)) "\n # save output\n if (total_step % 500 == 0):\n save_layer_output(model, [batch_img0, batch_img1, batch_mask0],\n layer_name='output', \n save_path_prefix='record/output')\n\n # print weights\n if (total_step % 500 == 0):\n print_model_weights_gradients(model, [batch_img0, batch_img1, batch_mask0],\n batch_mask1)\n " if ((total_step % 500) == 0): (val_batch_img0, val_batch_img1, val_batch_mask0, val_batch_mask1) = next(validation_generator) print('test:') print(model.test_on_batch([val_batch_img0, val_batch_img1, val_batch_mask0], val_batch_mask1, sample_weight=None)) total_step += 1 if ((epoch % 10) == 0): old_lr = float(K.get_value(model.optimizer.lr)) new_lr = (initial_lr * (decay_rate ** (epoch // 10))) K.set_value(model.optimizer.lr, new_lr) print(('learning rate is reset to %.8f' % new_lr)) if ((epoch % 5) == 0): model.save_weights(os.path.join(code_path, 'LVRV_Segmentation', 'model_lvrv_net_epoch{}.h5'.format(str(epoch).zfill(3)))) print('Training is done!')
def ukbiobank_data(): data_dir = config.data_root code_dir = config.code_root statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt') doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt') base_slices_file = os.path.join(code_dir, 'Preprocessing', 'base_slices.txt') with open(base_slices_file) as b_file: base_slices = b_file.readlines() base_slices = [x.strip() for x in base_slices] base_slices = [[int(z) for z in y.split()] for y in base_slices] with open(statistics_file) as s_file: statistics = s_file.readlines() statistics = [x.strip() for x in statistics] statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics] with open(doubtful_case_file) as d_file: doubtful_cases = d_file.readlines() doubtful_cases = [x.strip() for x in doubtful_cases] doubtful_cases = [int(x) for x in doubtful_cases] used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))] print('There will be {} used eids'.format(len(used_statistics))) train_statistics = [x for x in used_statistics if ((x[0] % 5) != 2)] test_statistics = [x for x in used_statistics if ((x[0] % 5) == 2)] train_img_list0 = [] train_img_list1 = [] train_gt_list0 = [] train_gt_list1 = [] test_img_list0 = [] test_img_list1 = [] test_gt_list0 = [] test_gt_list1 = [] train_subject_count = 0 for k in train_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] train_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] first_slice_idx = max((base_slice_t + 1), 0) for s in range(max(base_slice_t, 0), slices): s_t_image_file0 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) s_t_image_file1 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) if (s != max(base_slice_t, 0)): s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) else: s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((- 1)).zfill(2), str(t).zfill(2))) s_t_image_gt_file1 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) train_img_list0.append(s_t_image_file0) train_img_list1.append(s_t_image_file1) train_gt_list0.append(s_t_image_gt_file0) train_gt_list1.append(s_t_image_gt_file1) test_subject_count = 0 for k in test_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] test_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] first_slice_idx = max((base_slice_t + 1), 0) for s in range(max(base_slice_t, 0), slices): s_t_image_file0 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) s_t_image_file1 = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) if (s != max(base_slice_t, 0)): s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((s - 1)).zfill(2), str(t).zfill(2))) else: s_t_image_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str((- 1)).zfill(2), str(t).zfill(2))) s_t_image_gt_file1 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) test_img_list0.append(s_t_image_file0) test_img_list1.append(s_t_image_file1) test_gt_list0.append(s_t_image_gt_file0) test_gt_list1.append(s_t_image_gt_file1) print('train_subject_count = {}'.format(train_subject_count)) print('test_subject_count = {}'.format(test_subject_count)) print('train_image_count = {}'.format(len(train_img_list0))) print('test_image_count = {}'.format(len(test_img_list0))) return (train_img_list0, train_img_list1, train_gt_list0, train_gt_list1, test_img_list0, test_img_list1, test_gt_list0, test_gt_list1)
def ukbiobank_data(): data_dir = config.data_root code_dir = config.code_root statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt') doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt') base_slices_file = os.path.join(code_dir, 'Preprocessing', 'base_slices.txt') with open(base_slices_file) as b_file: base_slices = b_file.readlines() base_slices = [x.strip() for x in base_slices] base_slices = [[int(z) for z in y.split()] for y in base_slices] with open(statistics_file) as s_file: statistics = s_file.readlines() statistics = [x.strip() for x in statistics] statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics] with open(doubtful_case_file) as d_file: doubtful_cases = d_file.readlines() doubtful_cases = [x.strip() for x in doubtful_cases] doubtful_cases = [int(x) for x in doubtful_cases] used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))] print('There will be {} used eids'.format(len(used_statistics))) train_statistics = [x for x in used_statistics if ((x[0] % 5) != 2)] test_statistics = [x for x in used_statistics if ((x[0] % 5) == 2)] train_img_list = [] train_gt_list = [] train_first_slice_list = [] train_end_slice_list = [] train_base_list = [] test_img_list = [] test_gt_list = [] test_first_slice_list = [] test_end_slice_list = [] test_base_list = [] train_subject_count = 0 for k in train_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] train_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] s_t_image_file = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) train_img_list.append(s_t_image_file) train_gt_list.append(s_t_image_gt_file) train_first_slice_list.append(0) train_end_slice_list.append(slices) train_base_list.append(base_slice_t) test_subject_count = 0 for k in test_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] test_subject_count += 1 crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] s_t_image_file = os.path.join(crop_2D_path, 'crop_2D_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(0).zfill(2), str(t).zfill(2))) test_img_list.append(s_t_image_file) test_gt_list.append(s_t_image_gt_file) test_first_slice_list.append(0) test_end_slice_list.append(slices) test_base_list.append(base_slice_t) print('train_subject_count = {}'.format(train_subject_count)) print('test_subject_count = {}'.format(test_subject_count)) print('train_image_count = {}'.format(len(train_img_list))) print('test_image_count = {}'.format(len(test_img_list))) return (train_img_list, train_gt_list, train_first_slice_list, train_end_slice_list, train_base_list, test_img_list, test_gt_list, test_first_slice_list, test_end_slice_list, test_base_list)
def net_module(input_shape, num_outputs): 'Builds a net architecture.\n Args:\n input_shape: The input shape in the form (nb_rows, nb_cols, nb_channels)\n num_outputs: The number of outputs at final softmax layer\n Returns:\n The keras `Model`.\n ' CHANNEL_AXIS = 3 handle_dim_ordering() if (len(input_shape) != 3): raise Exception('Input shape should be a tuple (nb_rows, nb_cols, nb_channels)') if (K.image_dim_ordering() != 'tf'): input_shape = (input_shape[2], input_shape[0], input_shape[1]) input_img0 = Input(shape=input_shape, name='input_img0') input_img1 = Input(shape=input_shape, name='input_img1') input_mask0 = Input(shape=input_shape, name='input_mask0') input_mask0_one_hot = Lambda(one_hot, arguments={'num_classes': num_outputs})(input_mask0) concatenate = Concatenate(axis=CHANNEL_AXIS, name='concatenate')([input_img0, input_mask0_one_hot]) base_channel = 24 block_conv_1 = conv_bn_leakyrelu_repetition_block(filters=(1 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block1')(input_img1) block_pool_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block2')(block_conv_1) block_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block2')(block_pool_2) block_pool_4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block4')(block_conv_2) block_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block4')(block_pool_4) block_pool_8 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block8')(block_conv_4) block_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block8')(block_pool_8) block_pool_16 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block16')(block_conv_8) block_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block16')(block_pool_16) block_pool_32 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block32')(block_conv_16) block_conv_32 = conv_bn_leakyrelu_repetition_block(filters=(32 * base_channel), kernel_size=(3, 3), repetitions=1, first_layer_down_size=False, alpha=0.1, name='conv_block32')(block_pool_32) block_context_conv_1 = conv_bn_leakyrelu_repetition_block(filters=(1 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block1')(concatenate) block_context_pool_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block2')(block_context_conv_1) block_context_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block2')(block_context_pool_2) block_context_pool_4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block4')(block_context_conv_2) block_context_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block4')(block_context_pool_4) block_context_pool_8 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block8')(block_context_conv_4) block_context_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block8')(block_context_pool_8) block_context_pool_16 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block16')(block_context_conv_8) block_context_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='context_conv_block16')(block_context_pool_16) block_context_pool_32 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='context_pool_block32')(block_context_conv_16) block_context_conv_32 = conv_bn_leakyrelu_repetition_block(filters=(32 * base_channel), kernel_size=(3, 3), repetitions=1, first_layer_down_size=False, alpha=0.1, name='context_conv_block32')(block_context_pool_32) block_concat_32 = Concatenate(axis=CHANNEL_AXIS, name='concat32')([block_conv_32, block_context_conv_32]) block_expan_conv_32 = conv_bn_leakyrelu_repetition_block(filters=(32 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block32')(block_concat_32) block_up_16 = UpSampling2D(size=(2, 2), name='up_block16')(block_expan_conv_32) block_concat_16 = Concatenate(axis=CHANNEL_AXIS, name='concat16')([block_up_16, block_conv_16]) block_expan_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block16')(block_concat_16) block_up_8 = UpSampling2D(size=(2, 2), name='up_block8')(block_expan_conv_16) block_concat_8 = Concatenate(axis=CHANNEL_AXIS, name='concat8')([block_up_8, block_conv_8]) block_expan_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block8')(block_concat_8) block_up_4 = UpSampling2D(size=(2, 2), name='up_block4')(block_expan_conv_8) block_concat_4 = Concatenate(axis=CHANNEL_AXIS, name='concat4')([block_up_4, block_conv_4]) block_expan_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block4')(block_concat_4) block_up_2 = UpSampling2D(size=(2, 2), name='up_block2')(block_expan_conv_4) block_concat_2 = Concatenate(axis=CHANNEL_AXIS, name='concat2')([block_up_2, block_conv_2]) block_expan_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block2')(block_concat_2) block_up_1 = UpSampling2D(size=(2, 2), name='up_block1')(block_expan_conv_2) block_concat_1 = Concatenate(axis=CHANNEL_AXIS, name='concat1')([block_up_1, block_conv_1]) block_expan_conv_1 = conv_bn_leakyrelu_repetition_block(filters=(1 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block1')(block_concat_1) block_seg_4 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block4')(block_expan_conv_4) block_seg_2 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block2')(block_expan_conv_2) block_seg_1 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block1')(block_expan_conv_1) block_seg_up_2 = UpSampling2D(size=(2, 2), name='seg_up_block2')(block_seg_4) block_add_2 = Add(name='add_block2')([block_seg_up_2, block_seg_2]) block_seg_up_1 = UpSampling2D(size=(2, 2), name='seg_up_block1')(block_add_2) prediction = Add(name='prediction')([block_seg_up_1, block_seg_1]) reshape1 = Reshape(((input_shape[0] * input_shape[1]), (input_shape[2] * num_outputs)), name='reshape1')(prediction) prediction_softmax = Activation('softmax', name='softmax')(reshape1) reshape2 = Reshape((input_shape[0], input_shape[1], (input_shape[2] * num_outputs)), name='output')(prediction_softmax) model = Model(inputs=[input_img0, input_img1, input_mask0], outputs=reshape2) return model
def train_lv_net(): code_path = config.code_root initial_lr = config.lv_net_initial_lr decay_rate = config.lv_net_decay_rate batch_size = config.lv_net_batch_size input_img_size = config.lv_net_imput_img_size epochs = config.lv_net_epochs current_epoch = 0 new_start_epoch = current_epoch model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=3) if (current_epoch == 0): print('Building model') else: print('Loading model') model.load_weights(filepath=os.path.join(code_path, 'LV_Segmentation', 'model_lv_net_epoch{}.h5'.format(str(current_epoch).zfill(3)))) model.compile(optimizer=Adam(lr=initial_lr), loss=dice_coef6_loss, metrics=[dice_coef6, dice_coef5_0, dice_coef5_1, dice_coef5_2]) print('This model has {} parameters'.format(model.count_params())) (train_img_list0, train_img_list1, train_gt_list0, train_gt_list1, test_img_list0, test_img_list1, test_gt_list0, test_gt_list1) = ukbiobank_data() training_sample = len(train_img_list0) img_data_gen_args = dict(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=180.0, width_shift_range=0.15, height_shift_range=0.15, shear_range=0.0, zoom_range=0.15, channel_shift_range=0.0, fill_mode='constant', cval=0.0, horizontal_flip=True, vertical_flip=True, rescale=None, preprocessing_function=mean_variance_normalization5, data_format=K.image_data_format()) mask_data_gen_args = copy.deepcopy(img_data_gen_args) mask_data_gen_args['preprocessing_function'] = elementwise_multiplication2 print('Creating generators for training') image_datagen0 = ImageDataGenerator2(**img_data_gen_args) image_datagen1 = ImageDataGenerator2(**img_data_gen_args) mask_datagen0 = ImageDataGenerator2(**mask_data_gen_args) mask_datagen1 = ImageDataGenerator2(**mask_data_gen_args) seed = 1 image_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) image_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) mask_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) mask_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) image_generator0 = image_datagen0.flow_from_path_list(path_list=train_img_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) image_generator1 = image_datagen1.flow_from_path_list(path_list=train_img_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) mask_generator0 = mask_datagen0.flow_from_path_list(path_list=train_gt_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) mask_generator1 = mask_datagen1.flow_from_path_list(path_list=train_gt_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) train_generator = izip(image_generator0, image_generator1, mask_generator0, mask_generator1) print('Creating generators for validation') val_image_datagen0 = ImageDataGenerator2(**img_data_gen_args) val_image_datagen1 = ImageDataGenerator2(**img_data_gen_args) val_mask_datagen0 = ImageDataGenerator2(**mask_data_gen_args) val_mask_datagen1 = ImageDataGenerator2(**mask_data_gen_args) val_seed = 2 val_image_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_image_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_mask_datagen0.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_mask_datagen1.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_image_generator0 = val_image_datagen0.flow_from_path_list(path_list=test_img_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) val_image_generator1 = val_image_datagen1.flow_from_path_list(path_list=test_img_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) val_mask_generator0 = val_mask_datagen0.flow_from_path_list(path_list=test_gt_list0, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) val_mask_generator1 = val_mask_datagen1.flow_from_path_list(path_list=test_gt_list1, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) validation_generator = izip(val_image_generator0, val_image_generator1, val_mask_generator0, val_mask_generator1) print('Start training') steps = int(math.ceil((float(training_sample) / batch_size))) print('There will be {} epochs with {} steps in each epoch'.format(epochs, steps)) total_step = 0 for epoch in range((new_start_epoch + 1), ((new_start_epoch + epochs) + 1)): print('\n\n##########\nEpoch {}\n##########'.format(epoch)) for step in range(steps): print('\n****** Epoch {} Step {} ******'.format(epoch, step)) (batch_img0, batch_img1, batch_mask0, batch_mask1) = next(train_generator) print(model.train_on_batch([batch_img0, batch_img1, batch_mask0], batch_mask1, sample_weight=None, class_weight=None)) "\n # save output\n if (total_step % 500 == 0):\n save_layer_output(model, [batch_img0, batch_img1, batch_mask0],\n layer_name='output', \n save_path_prefix='record/output')\n\n # print weights\n if (total_step % 500 == 0):\n print_model_weights_gradients(model, [batch_img0, batch_img1, batch_mask0],\n batch_mask1)\n " if ((total_step % 500) == 0): (val_batch_img0, val_batch_img1, val_batch_mask0, val_batch_mask1) = next(validation_generator) print('test:') print(model.test_on_batch([val_batch_img0, val_batch_img1, val_batch_mask0], val_batch_mask1, sample_weight=None)) total_step += 1 if ((epoch % 10) == 0): old_lr = float(K.get_value(model.optimizer.lr)) new_lr = (initial_lr * (decay_rate ** (epoch // 10))) K.set_value(model.optimizer.lr, new_lr) print(('learning rate is reset to %.8f' % new_lr)) if ((epoch % 5) == 0): model.save_weights(os.path.join(code_path, 'LV_Segmentation', 'model_lv_net_epoch{}.h5'.format(str(epoch).zfill(3)))) print('Training is done!')
def adapt_ground_truth(adapt_original=True): data_dir = config.data_root code_dir = config.code_root statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt') doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt') if adapt_original: base_slices_file = os.path.join(code_dir, 'Preprocessing', 'base_slices.txt') base_slices = open(base_slices_file, 'w') with open(statistics_file) as s_file: statistics = s_file.readlines() statistics = [x.strip() for x in statistics] statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics] with open(doubtful_case_file) as d_file: doubtful_cases = d_file.readlines() doubtful_cases = [x.strip() for x in doubtful_cases] doubtful_cases = [int(x) for x in doubtful_cases] used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))] print('There will be {} used eids'.format(len(used_statistics))) for k in used_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] print(eid) crop_2D_path = os.path.join(data_dir, str(eid), 'crop_2D') original_2D_path = os.path.join(data_dir, str(eid), 'original_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] if (ed_es_instant1 >= 0): used_instants += [ed_es_instant1] written = str(eid) for t in used_instants: base_slice = (- 1) keep_base_lv = False for s in range(int(round(((slices * 0.5) + 0.001))), (- 1), (- 1)): gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) gt_file1 = os.path.join(crop_2D_path, 'crop_2D_gt_{}_{}.png'.format(str((s + 1)).zfill(2), str(t).zfill(2))) gt_data0 = np.array(Image.open(gt_file0)) gt_data1 = np.array(Image.open(gt_file1)) rv0 = np.where((gt_data0 == 150), np.ones_like(gt_data0), np.zeros_like(gt_data0)) rv1 = np.where((gt_data1 == 150), np.ones_like(gt_data1), np.zeros_like(gt_data1)) rv0_rv1_intersection = (rv0 * rv1) good_rv_ratio = (((float(np.sum(rv0_rv1_intersection)) / np.sum(rv1)) > 0.75) or ((float(np.sum(rv0)) / np.sum(rv1)) > 0.8)) has_lvc0 = (50 in gt_data0) has_lvm0 = (100 in gt_data0) lvm_surround_lvc = True shape_r = gt_data0.shape[0] shape_c = gt_data0.shape[1] for p in range((shape_r * shape_c)): r = (p // shape_c) c = (p % shape_c) if (gt_data0[(r, c)] == 50): up_surrounded = ((r != 0) and (gt_data0[((r - 1), c)] in [50, 100])) down_surrounded = ((r != (shape_r - 1)) and (gt_data0[((r + 1), c)] in [50, 100])) left_surrounded = ((c != 0) and (gt_data0[(r, (c - 1))] in [50, 100])) right_surrounded = ((c != (shape_c - 1)) and (gt_data0[(r, (c + 1))] in [50, 100])) if (not (up_surrounded and down_surrounded and left_surrounded and right_surrounded)): lvm_surround_lvc = False break if (not (good_rv_ratio and has_lvc0 and has_lvm0 and lvm_surround_lvc)): base_slice = s keep_base_lv = True break print('base_slice = {} keep_base_lv = {}'.format(base_slice, keep_base_lv)) written += ' {}'.format(base_slice) if adapt_original: for s in range(slices): gt_file0 = os.path.join(original_2D_path, 'original_gt_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) new_gt_file0 = os.path.join(original_2D_path, 'original_gt2_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) gt0 = Image.open(gt_file0) (c, r) = gt0.size if ((s < base_slice) or ((s == base_slice) and (not keep_base_lv))): new_gt_data0 = np.zeros((r, c)) Image.fromarray(new_gt_data0.astype('uint8')).save(new_gt_file0) elif ((s == base_slice) and keep_base_lv): gt_data0 = np.array(gt0) lv0 = np.where(np.logical_or((gt_data0 == 50), (gt_data0 == 100)), np.ones_like(gt_data0), np.zeros_like(gt_data0)) new_gt_data0 = (lv0 * gt_data0) Image.fromarray(new_gt_data0.astype('uint8')).save(new_gt_file0) else: command = 'cp {} {}'.format(gt_file0, new_gt_file0) os.system(command) else: for s in range(slices): gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) new_gt_file0 = os.path.join(crop_2D_path, 'crop_2D_gt2_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) gt0 = Image.open(gt_file0) (c, r) = gt0.size if ((s < base_slice) or ((s == base_slice) and (not keep_base_lv))): new_gt_data0 = np.zeros((r, c)) Image.fromarray(new_gt_data0.astype('uint8')).save(new_gt_file0) elif ((s == base_slice) and keep_base_lv): gt_data0 = np.array(gt0) lv0 = np.where(np.logical_or((gt_data0 == 50), (gt_data0 == 100)), np.ones_like(gt_data0), np.zeros_like(gt_data0)) new_gt_data0 = (lv0 * gt_data0) Image.fromarray(new_gt_data0.astype('uint8')).save(new_gt_file0) else: command = 'cp {} {}'.format(gt_file0, new_gt_file0) os.system(command) written += '\n' if adapt_original: base_slices.write(written) if adapt_original: base_slices.close()
def ukbiobank_data(): data_dir = config.data_root code_dir = config.code_root statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt') doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt') with open(statistics_file) as s_file: statistics = s_file.readlines() statistics = [x.strip() for x in statistics] statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics] with open(doubtful_case_file) as d_file: doubtful_cases = d_file.readlines() doubtful_cases = [x.strip() for x in doubtful_cases] doubtful_cases = [int(x) for x in doubtful_cases] used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))] print('There will be {} used eids'.format(len(used_statistics))) train_statistics = [x for x in used_statistics if ((x[0] % 5) != 2)] test_statistics = [x for x in used_statistics if ((x[0] % 5) == 2)] train_img_list = [] train_gt_list = [] test_img_list = [] test_gt_list = [] train_subject_count = 0 for k in train_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] train_subject_count += 1 original_2D_path = os.path.join(data_dir, str(eid), 'original_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] for (idx, t) in enumerate(used_instants): for s in range(int(round(((slices * 0.2) + 0.001))), int(round(((slices * 0.6) + 0.001)))): s_t_image_file = os.path.join(original_2D_path, 'original_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(original_2D_path, 'original_gt2_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) train_img_list.append(s_t_image_file) train_gt_list.append(s_t_image_gt_file) test_subject_count = 0 for k in test_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] test_subject_count += 1 original_2D_path = os.path.join(data_dir, str(eid), 'original_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] for (idx, t) in enumerate(used_instants): for s in range(int(round(((slices * 0.2) + 0.001))), int(round(((slices * 0.6) + 0.001)))): s_t_image_file = os.path.join(original_2D_path, 'original_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(original_2D_path, 'original_gt2_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) test_img_list.append(s_t_image_file) test_gt_list.append(s_t_image_gt_file) print('train_subject_count = {}'.format(train_subject_count)) print('test_subject_count = {}'.format(test_subject_count)) print('train_image_count = {}'.format(len(train_img_list))) print('test_image_count = {}'.format(len(test_img_list))) return (train_img_list, train_gt_list, test_img_list, test_gt_list)
def ukbiobank_data(): data_dir = config.data_root code_dir = config.code_root statistics_file = os.path.join(code_dir, 'Preprocessing', 'statistics_record.txt') doubtful_case_file = os.path.join(code_dir, 'Preprocessing', 'doubtful_segmentation_cases2.txt') base_slices_file = os.path.join(code_dir, 'Preprocessing', 'base_slices.txt') with open(base_slices_file) as b_file: base_slices = b_file.readlines() base_slices = [x.strip() for x in base_slices] base_slices = [[int(z) for z in y.split()] for y in base_slices] with open(statistics_file) as s_file: statistics = s_file.readlines() statistics = [x.strip() for x in statistics] statistics = [([int(z) for z in y.split()[:(- 4)]] + [float(z) for z in y.split()[(- 4):]]) for y in statistics] with open(doubtful_case_file) as d_file: doubtful_cases = d_file.readlines() doubtful_cases = [x.strip() for x in doubtful_cases] doubtful_cases = [int(x) for x in doubtful_cases] used_statistics = [k for k in statistics if ((k[0] not in doubtful_cases) and (k[1] == 1) and (k[2] == 1) and (k[7] >= 0) and (k[8] >= 0))] print('There will be {} used eids'.format(len(used_statistics))) train_statistics = [x for x in used_statistics if ((x[0] % 5) != 2)] test_statistics = [x for x in used_statistics if ((x[0] % 5) == 2)] train_img_list = [] train_gt_list = [] test_img_list = [] test_gt_list = [] train_subject_count = 0 for k in train_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] train_subject_count += 1 original_2D_path = os.path.join(data_dir, str(eid), 'original_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] first_slice_idx = max((base_slice_t + 1), 0) for s in range(first_slice_idx, (first_slice_idx + int(round(((slices * 0.4) + 0.001))))): s_t_image_file = os.path.join(original_2D_path, 'original_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(original_2D_path, 'original_gt2_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) train_img_list.append(s_t_image_file) train_gt_list.append(s_t_image_gt_file) test_subject_count = 0 for k in test_statistics: eid = k[0] slices = k[5] ed_es_instant0 = k[7] ed_es_instant1 = k[8] ed_es_instant0_min_slice = k[9] ed_es_instant0_max_slice = k[10] ed_es_instant1_min_slice = k[11] ed_es_instant1_max_slice = k[12] base_slice_list = [x for x in base_slices if (x[0] == eid)][0][1:] test_subject_count += 1 original_2D_path = os.path.join(data_dir, str(eid), 'original_2D') used_instants = [] if (ed_es_instant0 >= 0): used_instants += [ed_es_instant0] for (idx, t) in enumerate(used_instants): base_slice_t = base_slice_list[idx] first_slice_idx = max((base_slice_t + 1), 0) for s in range(first_slice_idx, (first_slice_idx + int(round(((slices * 0.4) + 0.001))))): s_t_image_file = os.path.join(original_2D_path, 'original_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) s_t_image_gt_file = os.path.join(original_2D_path, 'original_gt2_2D_{}_{}.png'.format(str(s).zfill(2), str(t).zfill(2))) test_img_list.append(s_t_image_file) test_gt_list.append(s_t_image_gt_file) print('train_subject_count = {}'.format(train_subject_count)) print('test_subject_count = {}'.format(test_subject_count)) print('train_image_count = {}'.format(len(train_img_list))) print('test_image_count = {}'.format(len(test_img_list))) return (train_img_list, train_gt_list, test_img_list, test_gt_list)
def net_module(input_shape, num_outputs): 'Builds a net architecture.\n Args:\n input_shape: The input shape in the form (nb_rows, nb_cols, nb_channels)\n num_outputs: The number of outputs at final softmax layer\n Returns:\n The keras `Model`.\n ' CHANNEL_AXIS = 3 handle_dim_ordering() if (len(input_shape) != 3): raise Exception('Input shape should be a tuple (nb_rows, nb_cols, nb_channels)') if (K.image_dim_ordering() != 'tf'): input_shape = (input_shape[2], input_shape[0], input_shape[1]) input = Input(shape=input_shape, name='input') base_channel = 24 block_conv_1 = conv_bn_leakyrelu_repetition_block(filters=base_channel, kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block1')(input) block_pool_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block2')(block_conv_1) block_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block2')(block_pool_2) block_pool_4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block4')(block_conv_2) block_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block4')(block_pool_4) block_pool_8 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block8')(block_conv_4) block_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block8')(block_pool_8) block_pool_16 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block16')(block_conv_8) block_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block16')(block_pool_16) block_pool_32 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid', data_format=None, name='pool_block32')(block_conv_16) block_conv_32 = conv_bn_leakyrelu_repetition_block(filters=(32 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='conv_block32')(block_pool_32) block_up_16 = UpSampling2D(size=(2, 2), name='up_block16')(block_conv_32) block_concat_16 = Concatenate(axis=CHANNEL_AXIS, name='concat16')([block_up_16, block_conv_16]) block_expan_conv_16 = conv_bn_leakyrelu_repetition_block(filters=(16 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block16')(block_concat_16) block_up_8 = UpSampling2D(size=(2, 2), name='up_block8')(block_expan_conv_16) block_concat_8 = Concatenate(axis=CHANNEL_AXIS, name='concat8')([block_up_8, block_conv_8]) block_expan_conv_8 = conv_bn_leakyrelu_repetition_block(filters=(8 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block8')(block_concat_8) block_up_4 = UpSampling2D(size=(2, 2), name='up_block4')(block_expan_conv_8) block_concat_4 = Concatenate(axis=CHANNEL_AXIS, name='concat4')([block_up_4, block_conv_4]) block_expan_conv_4 = conv_bn_leakyrelu_repetition_block(filters=(4 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block4')(block_concat_4) block_up_2 = UpSampling2D(size=(2, 2), name='up_block2')(block_expan_conv_4) block_concat_2 = Concatenate(axis=CHANNEL_AXIS, name='concat2')([block_up_2, block_conv_2]) block_expan_conv_2 = conv_bn_leakyrelu_repetition_block(filters=(2 * base_channel), kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block2')(block_concat_2) block_up_1 = UpSampling2D(size=(2, 2), name='up_block1')(block_expan_conv_2) block_concat_1 = Concatenate(axis=CHANNEL_AXIS, name='concat1')([block_up_1, block_conv_1]) block_expan_conv_1 = conv_bn_leakyrelu_repetition_block(filters=base_channel, kernel_size=(3, 3), repetitions=2, first_layer_down_size=False, alpha=0.1, name='expan_conv_block1')(block_concat_1) block_seg_4 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block4')(block_expan_conv_4) block_seg_2 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block2')(block_expan_conv_2) block_seg_1 = Conv2D(filters=num_outputs, kernel_size=(1, 1), strides=(1, 1), padding='same', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='he_normal', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name='seg_block1')(block_expan_conv_1) block_seg_up_2 = UpSampling2D(size=(2, 2), name='seg_up_block2')(block_seg_4) block_add_2 = Add(name='add_block2')([block_seg_up_2, block_seg_2]) block_seg_up_1 = UpSampling2D(size=(2, 2), name='seg_up_block1')(block_add_2) prediction = Add(name='prediction')([block_seg_up_1, block_seg_1]) output = Activation('sigmoid', name='output')(prediction) model = Model(inputs=input, outputs=output) return model
def train_roi_net(): code_path = config.code_root initial_lr = config.roi_net_initial_lr decay_rate = config.roi_net_decay_rate batch_size = config.roi_net_batch_size input_img_size = config.roi_net_imput_img_size epochs = config.roi_net_epochs current_epoch = 0 new_start_epoch = current_epoch model = net_module(input_shape=(input_img_size, input_img_size, 1), num_outputs=1) if (current_epoch == 0): print('Building model') else: print('Loading model') model.load_weights(filepath=os.path.join(code_path, 'ROI', 'model_roi_net_epoch{}.h5'.format(str(current_epoch).zfill(3)))) model.compile(optimizer=Adam(lr=initial_lr), loss=dice_coef2_loss, metrics=[dice_coef2]) print('This model has {} parameters'.format(model.count_params())) (train_img_list, train_gt_list, test_img_list, test_gt_list) = ukbiobank_data() training_sample = len(train_img_list) img_data_gen_args = dict(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=180.0, width_shift_range=0.05, height_shift_range=0.05, shear_range=0.0, zoom_range=0.05, channel_shift_range=0.0, fill_mode='constant', cval=0.0, horizontal_flip=True, vertical_flip=True, rescale=None, preprocessing_function=mean_variance_normalization5, data_format=K.image_data_format()) mask_data_gen_args = copy.deepcopy(img_data_gen_args) mask_data_gen_args['preprocessing_function'] = elementwise_multiplication print('Creating generators for training') image_datagen = ImageDataGenerator2(**img_data_gen_args) mask_datagen = ImageDataGenerator2(**mask_data_gen_args) seed = 1 image_datagen.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) mask_datagen.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=seed) image_generator = image_datagen.flow_from_path_list(path_list=train_img_list, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=True, clahe=True, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) mask_generator = mask_datagen.flow_from_path_list(path_list=train_gt_list, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=seed, save_to_dir=None, save_prefix='', save_format='png', save_period=500, follow_links=False) train_generator = izip(image_generator, mask_generator) print('Creating generators for validation') val_image_datagen = ImageDataGenerator2(**img_data_gen_args) val_mask_datagen = ImageDataGenerator2(**mask_data_gen_args) val_seed = 2 val_image_datagen.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_mask_datagen.fit(np.zeros((1, 1, 1, 1)), augment=False, rounds=0, seed=val_seed) val_image_generator = val_image_datagen.flow_from_path_list(path_list=test_img_list, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=True, clahe=True, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) val_mask_generator = val_mask_datagen.flow_from_path_list(path_list=test_gt_list, target_size=(input_img_size, input_img_size), pad_to_square=True, resize_mode='nearest', histogram_based_preprocessing=False, clahe=False, color_mode='grayscale', class_list=None, class_mode=None, batch_size=batch_size, shuffle=True, seed=val_seed, save_to_dir=None, save_prefix='', save_format='png', save_period=1, follow_links=False) validation_generator = izip(val_image_generator, val_mask_generator) print('Start training') steps = int(math.ceil((float(training_sample) / batch_size))) print('There will be {} epochs with {} steps in each epoch'.format(epochs, steps)) total_step = 0 for epoch in range((new_start_epoch + 1), ((new_start_epoch + epochs) + 1)): print('\n\n##########\nEpoch {}\n##########'.format(epoch)) for step in range(steps): print('\n****** Epoch {} Step {} ******'.format(epoch, step)) (batch_img, batch_mask) = next(train_generator) print(model.train_on_batch(batch_img, batch_mask, sample_weight=None, class_weight=None)) "\n # save output\n if (total_step % 500 == 0):\n save_layer_output(model, batch_img, layer_name='output', \n save_path_prefix='record/output')\n # print weights\n if (total_step % 500 == 0):\n print_model_weights_gradients(model, batch_img, batch_mask)\n " if ((total_step % 500) == 0): (val_batch_img, val_batch_mask) = next(validation_generator) print('test:') print(model.test_on_batch(val_batch_img, val_batch_mask, sample_weight=None)) total_step += 1 if ((epoch % 10) == 0): old_lr = float(K.get_value(model.optimizer.lr)) new_lr = (initial_lr * (decay_rate ** (epoch // 10))) K.set_value(model.optimizer.lr, new_lr) print(('learning rate is reset to %.8f' % new_lr)) if ((epoch % 5) == 0): model.save_weights(os.path.join(code_path, 'ROI', 'model_roi_net_epoch{}.h5'.format(str(epoch).zfill(3)))) print('Training is done!')
def download_weights(): if (sys.version_info >= (3, 0)): import urllib.request as urltool else: import urllib as urltool code_dir = config.code_root print('Downloading pretrained ROI-net') roi_net_source = 'http://www-sop.inria.fr/members/Qiao.Zheng/CardiacSegmentationPropagation/ROI/model_roi_net_epoch050.h5' roi_net_destination = os.path.join(code_dir, 'ROI', 'model_roi_net_epoch050.h5') urltool.urlretrieve(roi_net_source, roi_net_destination) print('Downloading pretrained LVRV-net') lvrv_net_source = 'http://www-sop.inria.fr/members/Qiao.Zheng/CardiacSegmentationPropagation/LVRV_Segmentation/model_lvrv_net_epoch080.h5' lvrv_net_destination = os.path.join(code_dir, 'LVRV_Segmentation', 'model_lvrv_net_epoch080.h5') urltool.urlretrieve(lvrv_net_source, lvrv_net_destination) print('Downloading pretrained LV-net') lv_net_source = 'http://www-sop.inria.fr/members/Qiao.Zheng/CardiacSegmentationPropagation/LV_Segmentation/model_lv_net_epoch080.h5' lv_net_destination = os.path.join(code_dir, 'LV_Segmentation', 'model_lv_net_epoch080.h5') urltool.urlretrieve(lv_net_source, lv_net_destination)
def PSNR(gt, img): mse = np.mean(np.square((gt - img))) return ((20 * np.log10(255)) - (10 * np.log10(mse)))
def loss_mse(): def n2v_mse(y_true, y_pred): (target, mask) = tf.split(y_true, 2, axis=(len(y_true.shape) - 1)) loss = (tf.reduce_sum(K.square((target - (y_pred * mask)))) / tf.reduce_sum(mask)) return loss return n2v_mse
def loss_mae(): def n2v_abs(y_true, y_pred): (target, mask) = tf.split(y_true, 2, axis=(len(y_true.shape) - 1)) loss = (tf.reduce_sum(K.abs((target - (y_pred * mask)))) / tf.reduce_sum(mask)) return loss return n2v_abs
class N2VConfig(argparse.Namespace): "Default configuration for a N2V trainable CARE model.\n\n This class is meant to be used with :class:`N2V`.\n\n Parameters\n ----------\n X : array(float)\n The training data 'X', with dimensions 'SZYXC' or 'SYXC'\n kwargs : dict\n Overwrite (or add) configuration attributes (see below).\n\n Example\n -------\n >>> n2v_config = N2VConfig(X, unet_n_depth=3)\n\n Attributes\n ----------\n unet_residual : bool\n Parameter `residual` of :func:`csbdeep.nets.common_unet`. Default: ``n_channel_in == n_channel_out``\n unet_n_depth : int\n Parameter `n_depth` of :func:`csbdeep.nets.common_unet`. Default: ``2``\n unet_kern_size : int\n Parameter `kern_size` of :func:`csbdeep.nets.common_unet`. Default: ``5 if n_dim==2 else 3``\n unet_n_first : int\n Parameter `n_first` of :func:`csbdeep.nets.common_unet`. Default: ``32``\n batch_norm : bool\n Activate batch norm\n unet_last_activation : str\n Parameter `last_activation` of :func:`csbdeep.nets.common_unet`. Default: ``linear``\n train_loss : str\n Name of training loss. Default: ``'laplace' if probabilistic else 'mae'``\n train_epochs : int\n Number of training epochs. Default: ``100``\n train_steps_per_epoch : int\n Number of parameter update steps per epoch. Default: ``400``\n train_learning_rate : float\n Learning rate for training. Default: ``0.0004``\n train_batch_size : int\n Batch size for training. Default: ``16``\n train_tensorboard : bool\n Enable TensorBoard for monitoring training progress. Default: ``True``\n train_checkpoint : str\n Name of checkpoint file for model weights (only best are saved); set to ``None`` to disable. Default: ``weights_best.h5``\n train_reduce_lr : dict\n Parameter :class:`dict` of ReduceLROnPlateau_ callback; set to ``None`` to disable. Default: ``{'factor': 0.5, 'patience': 10}``\n n2v_perc_pix : float\n Percentage of pixel to manipulate per patch. Default: ``1.5``\n n2v_patch_shape : tuple\n Random patches of this shape are extracted from the given training data. Default: ``(64, 64) if n_dim==2 else (64, 64, 64)``\n n2v_manipulator : str\n Noise2Void pixel value manipulator. Default: ``uniform_withCP``\n n2v_neighborhood_radius : int\n Neighborhood radius for n2v manipulator. Default: ``5``\n single_net_per_channel : bool\n Enabling this creates a unet for each channel and each channel will be treated independently.\n Note: This makes the ``network n_channel_in`` times larger. Default: ``True``\n structN2Vmask : [[int]]\n Masking kernel for StructN2V to hide pixels adjacent to main blind spot. Value 1 = 'hidden', Value 0 = 'non hidden'. Nested lists equivalent to ndarray. Must have odd length in each dimension (center pixel is blind spot). Default ``None`` implies normal N2V masking.\n blurpool : bool\n Use blurpool for maxpooling.\n skip_skipone: bool\n Remove top-most skip-connection.\n\n .. _ReduceLROnPlateau: https://keras.io/callbacks/#reducelronplateau\n " def __init__(self, X, **kwargs): if (X.size != 0): assert ((len(X.shape) == 4) or (len(X.shape) == 5)), "Only 'SZYXC' or 'SYXC' as dimensions is supported." n_dim = (len(X.shape) - 2) n_channel_in = X.shape[(- 1)] n_channel_out = n_channel_in (means, stds) = ([], []) for i in range(n_channel_in): means.append(np.mean(X[(..., i)])) stds.append(np.std(X[(..., i)])) if (n_dim == 2): axes = 'SYXC' elif (n_dim == 3): axes = 'SZYXC' axes = axes_check_and_normalize(axes) ax = axes_dict(axes) ax = {a: (ax[a] is not None) for a in ax} ((ax['X'] and ax['Y']) or _raise(ValueError('lateral axes X and Y must be present.'))) ((not (ax['Z'] and ax['T'])) or _raise(ValueError('using Z and T axes together not supported.'))) (axes.startswith('S') or (not ax['S']) or _raise(ValueError('sample axis S must be first.'))) axes = axes.replace('S', '') if backend_channels_last(): if ax['C']: ((axes[(- 1)] == 'C') or _raise(ValueError(('channel axis must be last for backend (%s).' % K.backend())))) else: axes += 'C' elif ax['C']: ((axes[0] == 'C') or _raise(ValueError(('channel axis must be first for backend (%s).' % K.backend())))) else: axes = ('C' + axes) self.means = [str(el) for el in means] self.stds = [str(el) for el in stds] self.n_dim = n_dim self.axes = axes self.n_channel_in = int(n_channel_in) self.n_channel_out = int(n_channel_out) self.unet_residual = False self.unet_n_depth = 2 self.unet_kern_size = (5 if (self.n_dim == 2) else 3) self.unet_n_first = 32 self.unet_last_activation = 'linear' if backend_channels_last(): self.unet_input_shape = ((self.n_dim * (None,)) + (self.n_channel_in,)) else: self.unet_input_shape = ((self.n_channel_in,) + (self.n_dim * (None,))) self.train_loss = 'mae' self.train_epochs = 100 self.train_steps_per_epoch = 400 self.train_learning_rate = 0.0004 self.train_batch_size = 16 self.train_tensorboard = True self.train_checkpoint = 'weights_best.h5' self.train_reduce_lr = {'factor': 0.5, 'patience': 10} self.batch_norm = True self.n2v_perc_pix = 1.5 self.n2v_patch_shape = ((64, 64) if (self.n_dim == 2) else (64, 64, 64)) self.n2v_manipulator = 'uniform_withCP' self.n2v_neighborhood_radius = 5 self.single_net_per_channel = True self.blurpool = False self.skip_skipone = False try: del kwargs['n_dim'] except: pass self.structN2Vmask = None self.probabilistic = False for k in kwargs: setattr(self, k, kwargs[k]) def is_valid(self, return_invalid=False): 'Check if configuration is valid.\n\n Returns\n -------\n bool\n Flag that indicates whether the current configuration values are valid.\n ' def _is_int(v, low=None, high=None): return (isinstance(v, int) and (True if (low is None) else (low <= v)) and (True if (high is None) else (v <= high))) ok = {} ok['means'] = True for mean in self.means: ok['means'] &= np.isscalar(float(mean)) ok['stds'] = True for std in self.stds: ok['stds'] &= (np.isscalar(float(std)) and (float(std) > 0.0)) ok['n_dim'] = (self.n_dim in (2, 3)) try: axes_check_and_normalize(self.axes, (self.n_dim + 1), disallowed='S') ok['axes'] = True except: ok['axes'] = False ok['n_channel_in'] = _is_int(self.n_channel_in, 1) ok['n_channel_out'] = _is_int(self.n_channel_out, 1) ok['unet_residual'] = (isinstance(self.unet_residual, bool) and ((not self.unet_residual) or (self.n_channel_in == self.n_channel_out))) ok['unet_n_depth'] = _is_int(self.unet_n_depth, 1) ok['unet_kern_size'] = _is_int(self.unet_kern_size, 1) ok['unet_n_first'] = _is_int(self.unet_n_first, 1) ok['unet_last_activation'] = (self.unet_last_activation in ('linear', 'relu')) ok['unet_input_shape'] = (isinstance(self.unet_input_shape, (list, tuple)) and (len(self.unet_input_shape) == (self.n_dim + 1)) and (self.unet_input_shape[(- 1)] == self.n_channel_in) and all((((d is None) or (_is_int(d) and ((d % (2 ** self.unet_n_depth)) == 0))) for d in self.unet_input_shape[:(- 1)]))) ok['train_loss'] = (self.train_loss in ('mse', 'mae')) ok['train_epochs'] = _is_int(self.train_epochs, 1) ok['train_steps_per_epoch'] = _is_int(self.train_steps_per_epoch, 1) ok['train_learning_rate'] = (np.isscalar(self.train_learning_rate) and (self.train_learning_rate > 0)) ok['train_batch_size'] = _is_int(self.train_batch_size, 1) ok['train_tensorboard'] = isinstance(self.train_tensorboard, bool) ok['train_checkpoint'] = ((self.train_checkpoint is None) or isinstance(self.train_checkpoint, string_types)) ok['train_reduce_lr'] = ((self.train_reduce_lr is None) or isinstance(self.train_reduce_lr, dict)) ok['batch_norm'] = isinstance(self.batch_norm, bool) ok['n2v_perc_pix'] = ((self.n2v_perc_pix > 0) and (self.n2v_perc_pix <= 100)) ok['n2v_patch_shape'] = (isinstance(self.n2v_patch_shape, (list, tuple)) and (len(self.n2v_patch_shape) == self.n_dim) and all(((d > 0) for d in self.n2v_patch_shape))) ok['n2v_manipulator'] = (self.n2v_manipulator in ['normal_withoutCP', 'uniform_withCP', 'uniform_withoutCP', 'normal_additive', 'normal_fitted', 'identity', 'mean', 'median']) ok['n2v_neighborhood_radius'] = _is_int(self.n2v_neighborhood_radius, 0) ok['single_net_per_channel'] = isinstance(self.single_net_per_channel, bool) ok['blurpool'] = isinstance(self.blurpool, bool) ok['skip_skipone'] = isinstance(self.skip_skipone, bool) if (self.structN2Vmask is None): ok['structN2Vmask'] = True else: mask = np.array(self.structN2Vmask) t1 = (mask.ndim == self.n_dim) t2 = all((((x % 2) == 1) for x in mask.shape)) t3 = all([(x in [0, 1]) for x in mask.flat]) ok['structN2Vmask'] = (t1 and t2 and t3) if return_invalid: return (all(ok.values()), tuple((k for (k, v) in ok.items() if (not v)))) else: return all(ok.values()) def update_parameters(self, allow_new=True, **kwargs): if (not allow_new): attr_new = [] for k in kwargs: try: getattr(self, k) except AttributeError: attr_new.append(k) if (len(attr_new) > 0): raise AttributeError(('Not allowed to add new parameters (%s)' % ', '.join(attr_new))) for k in kwargs: setattr(self, k, kwargs[k])
class N2V(CARE): "The Noise2Void training scheme to train a standard CARE network for image restoration and enhancement.\n\n Uses a convolutional neural network created by :func:`csbdeep.internals.nets.custom_unet`.\n\n Parameters\n ----------\n config : :class:`n2v.models.N2VConfig` or None\n Valid configuration of N2V network (see :func:`N2VConfig.is_valid`).\n Will be saved to disk as JSON (``config.json``).\n If set to ``None``, will be loaded from disk (must exist).\n name : str or None\n Model name. Uses a timestamp if set to ``None`` (default).\n basedir : str\n Directory that contains (or will contain) a folder with the given model name.\n Use ``None`` to disable saving (or loading) any data to (or from) disk (regardless of other parameters).\n\n Raises\n ------\n FileNotFoundError\n If ``config=None`` and config cannot be loaded from disk.\n ValueError\n Illegal arguments, including invalid configuration.\n\n Example\n -------\n >>> model = N2V(config, 'my_model')\n\n Attributes\n ----------\n config : :class:`n2v.models.N2VConfig`\n Configuration of N2V trainable CARE network, as provided during instantiation.\n keras_model : `Keras model <https://keras.io/getting-started/functional-api-guide/>`_\n Keras neural network model.\n name : str\n Model name.\n logdir : :class:`pathlib.Path`\n Path to model folder (which stores configuration, weights, etc.)\n " def __init__(self, config, name=None, basedir='.'): 'See class docstring.' ((config is None) or isinstance(config, self._config_class) or _raise(ValueError(("Invalid configuration of type '%s', was expecting type '%s'." % (type(config).__name__, self._config_class.__name__))))) if ((config is not None) and (not config.is_valid())): invalid_attr = config.is_valid(True)[1] raise ValueError(('Invalid configuration attributes: ' + ', '.join(invalid_attr))) ((not ((config is None) and (basedir is None))) or _raise(ValueError('No config provided and cannot be loaded from disk since basedir=None.'))) ((name is None) or (isinstance(name, string_types) and (len(name) > 0)) or _raise(ValueError(("No valid name: '%s'" % str(name))))) ((basedir is None) or isinstance(basedir, (string_types, Path)) or _raise(ValueError(("No valid basedir: '%s'" % str(basedir))))) self.config = config self.name = (name if (name is not None) else datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S.%f')) self.basedir = (Path(basedir) if (basedir is not None) else None) if (config is not None): self._update_and_check_config() self._set_logdir() if (config is None): self._update_and_check_config() self._model_prepared = False self.keras_model = self._build() if (config is None): self._find_and_load_weights() def _build(self): return self._build_unet(n_dim=self.config.n_dim, residual=self.config.unet_residual, n_depth=self.config.unet_n_depth, kern_size=self.config.unet_kern_size, n_first=self.config.unet_n_first, last_activation=self.config.unet_last_activation, batch_norm=self.config.batch_norm, blurpool=self.config.blurpool, skip_skipone=self.config.skip_skipone)(self.config.unet_input_shape, self.config.single_net_per_channel) def _build_unet(self, n_dim=2, n_depth=2, kern_size=3, n_first=32, residual=True, last_activation='linear', batch_norm=True, blurpool=False, skip_skipone=False): "Construct a common CARE neural net based on U-Net [1]_ and residual learning [2]_ to be used for image restoration/enhancement.\n Parameters\n ----------\n n_dim : int\n number of image dimensions (2 or 3)\n n_depth : int\n number of resolution levels of U-Net architecture\n kern_size : int\n size of convolution filter in all image dimensions\n n_first : int\n number of convolution filters for first U-Net resolution level (value is doubled after each downsampling operation)\n n_channel_out : int\n number of channels of the predicted output image\n residual : bool\n if True, model will internally predict the residual w.r.t. the input (typically better)\n requires number of input and output image channels to be equal\n last_activation : str\n name of activation function for the final output layer\n batch_norm : bool\n Use batch normalization during training\n Returns\n -------\n function\n Function to construct the network, which takes as argument the shape of the input image\n Example\n -------\n >>> model = common_unet(2, 2, 3, 32, 1, True, 'linear', True)(input_shape)\n References\n ----------\n .. [1] Olaf Ronneberger, Philipp Fischer, Thomas Brox, *U-Net: Convolutional Networks for Biomedical Image Segmentation*, MICCAI 2015\n .. [2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. *Deep Residual Learning for Image Recognition*, CVPR 2016\n " def _build_this(input_shape, single_net_per_channel): if single_net_per_channel: return build_single_unet_per_channel(input_shape, last_activation, n_depth, n_first, ((kern_size,) * n_dim), pool_size=((2,) * n_dim), residual=residual, prob_out=False, batch_norm=batch_norm, blurpool=blurpool, skip_skipone=skip_skipone) else: return build_unet(input_shape, last_activation, n_depth, n_first, ((kern_size,) * n_dim), pool_size=((2,) * n_dim), residual=residual, prob_out=False, batch_norm=batch_norm, blurpool=blurpool, skip_skipone=skip_skipone) return _build_this def train(self, X, validation_X, epochs=None, steps_per_epoch=None): 'Train the neural network with the given data.\n\n Parameters\n ----------\n X : :class:`numpy.ndarray`\n Array of source images.\n validation_x : :class:`numpy.ndarray`\n Array of validation images.\n epochs : int\n Optional argument to use instead of the value from ``config``.\n steps_per_epoch : int\n Optional argument to use instead of the value from ``config``.\n\n Returns\n -------\n ``History`` object\n See `Keras training history <https://keras.io/models/model/#fit>`_.\n\n ' (n_train, n_val) = (len(X), len(validation_X)) frac_val = ((1.0 * n_val) / (n_train + n_val)) frac_warn = 0.05 if (frac_val < frac_warn): warnings.warn(('small number of validation images (only %.1f%% of all images)' % (100 * frac_val))) axes = axes_check_and_normalize(('S' + self.config.axes), X.ndim) ax = axes_dict(axes) div_by = (2 ** self.config.unet_n_depth) axes_relevant = ''.join((a for a in 'XYZT' if (a in axes))) val_num_pix = 1 train_num_pix = 1 val_patch_shape = () for a in axes_relevant: n = X.shape[ax[a]] val_num_pix *= validation_X.shape[ax[a]] train_num_pix *= X.shape[ax[a]] val_patch_shape += tuple([validation_X.shape[ax[a]]]) if ((n % div_by) != 0): raise ValueError(('training images must be evenly divisible by %d along axes %s (axis %s has incompatible size %d)' % (div_by, axes_relevant, a, n))) if (epochs is None): epochs = self.config.train_epochs if (steps_per_epoch is None): steps_per_epoch = self.config.train_steps_per_epoch if (not self._model_prepared): self.prepare_for_training() manipulator = eval('pm_{0}({1})'.format(self.config.n2v_manipulator, str(self.config.n2v_neighborhood_radius))) means = np.array([float(mean) for mean in self.config.means], ndmin=len(X.shape), dtype=np.float32) stds = np.array([float(std) for std in self.config.stds], ndmin=len(X.shape), dtype=np.float32) X = self.__normalize__(X, means, stds) validation_X = self.__normalize__(validation_X, means, stds) _mask = (np.array(self.config.structN2Vmask) if self.config.structN2Vmask else None) training_data = N2V_DataWrapper(X, np.concatenate((X, np.zeros(X.shape, dtype=X.dtype)), axis=axes.index('C')), batch_size=self.config.train_batch_size, length=(self.config.train_steps_per_epoch * self.config.train_epochs), perc_pix=self.config.n2v_perc_pix, shape=self.config.n2v_patch_shape, value_manipulation=manipulator, structN2Vmask=_mask) validation_Y = np.concatenate((validation_X, np.zeros(validation_X.shape, dtype=validation_X.dtype)), axis=axes.index('C')) n2v_utils.manipulate_val_data(validation_X, validation_Y, perc_pix=self.config.n2v_perc_pix, shape=val_patch_shape, value_manipulation=manipulator) self.callbacks.append(CARETensorBoardImage(model=self.keras_model, data=(validation_X, validation_X), log_dir=str(((self.logdir / 'logs') / 'images')), n_images=3, prob_out=False)) history = self.keras_model.fit(iter(training_data), validation_data=(validation_X, validation_Y), epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=self.callbacks, verbose=1) if (self.basedir is not None): self.keras_model.save_weights(str((self.logdir / 'weights_last.h5'))) if (self.config.train_checkpoint is not None): print() self._find_and_load_weights(self.config.train_checkpoint) try: (self.logdir / 'weights_now.h5').unlink() except FileNotFoundError: pass return history def prepare_for_training(self, optimizer=None, **kwargs): 'Prepare for neural network training.\n\n Calls :func:`csbdeep.internals.train.prepare_model` and creates\n `Keras Callbacks <https://keras.io/callbacks/>`_ to be used for training.\n\n Note that this method will be implicitly called once by :func:`train`\n (with default arguments) if not done so explicitly beforehand.\n\n Parameters\n ----------\n optimizer : obj or None\n Instance of a `Keras Optimizer <https://keras.io/optimizers/>`_ to be used for training.\n If ``None`` (default), uses ``Adam`` with the learning rate specified in ``config``.\n kwargs : dict\n Additional arguments for :func:`csbdeep.internals.train.prepare_model`.\n\n ' if (optimizer is None): from tensorflow.keras.optimizers import Adam optimizer = Adam(learning_rate=self.config.train_learning_rate) self.callbacks = self.prepare_model(self.keras_model, optimizer, self.config.train_loss, **kwargs) if (self.basedir is not None): if (self.config.train_checkpoint is not None): from tensorflow.keras.callbacks import ModelCheckpoint self.callbacks.append(ModelCheckpoint(str((self.logdir / self.config.train_checkpoint)), save_best_only=True, save_weights_only=True)) self.callbacks.append(ModelCheckpoint(str((self.logdir / 'weights_now.h5')), save_best_only=False, save_weights_only=True)) if self.config.train_tensorboard: from tensorflow.keras.callbacks import TensorBoard self.callbacks.append(TensorBoard(log_dir=str((self.logdir / 'logs')), write_graph=False, profile_batch=0)) if (self.config.train_reduce_lr is not None): from tensorflow.keras.callbacks import ReduceLROnPlateau rlrop_params = self.config.train_reduce_lr if ('verbose' not in rlrop_params): rlrop_params['verbose'] = True self.callbacks.append(ReduceLROnPlateau(**rlrop_params)) self._model_prepared = True def prepare_model(self, model, optimizer, loss, metrics=('mse', 'mae')): ' TODO ' from tensorflow.keras.optimizers import Optimizer (isinstance(optimizer, Optimizer) or _raise(ValueError())) if (loss == 'mse'): loss_standard = eval('loss_mse()') elif (loss == 'mae'): loss_standard = eval('loss_mae()') _metrics = [eval(('loss_%s()' % m)) for m in metrics] callbacks = [TerminateOnNaN()] model.compile(optimizer=optimizer, loss=loss_standard, metrics=_metrics) return callbacks def __normalize__(self, data, means, stds): return ((data - means) / stds) def __denormalize__(self, data, means, stds): return ((data * stds) + means) def predict(self, img, axes, resizer=PadAndCropResizer(), n_tiles=None, tta=False): "\n Apply the network to sofar unseen data. This method expects the raw data, i.e. not normalized.\n During prediction the mean and standard deviation, stored with the model (during data generation), are used\n for normalization.\n\n Parameters\n ----------\n img : array(floats)\n The raw images.\n axes : String\n Axes of the image ('YX').\n resizer : class(Resizer), optional(default=PadAndCropResizer())\n n_tiles : tuple(int)\n Number of tiles to tile the image into, if it is too large for memory.\n tta : bool\n Use test-time augmentation during prediction.\n\n Returns\n -------\n image : array(float)\n The restored image.\n " means = np.array([float(mean) for mean in self.config.means], ndmin=len(img.shape), dtype=np.float32) stds = np.array([float(std) for std in self.config.stds], ndmin=len(img.shape), dtype=np.float32) if (img.dtype != np.float32): print('The input image is of type {} and will be casted to float32 for prediction.'.format(img.dtype)) img = img.astype(np.float32) new_axes = axes new_n_tiles = n_tiles if ('C' in axes): new_axes = (axes.replace('C', '') + 'C') if n_tiles: new_n_tiles = (tuple([n_tiles[axes.index(c)] for c in axes if (c != 'C')]) + (n_tiles[axes.index('C')],)) normalized = self.__normalize__(np.moveaxis(img, axes.index('C'), (- 1)), means, stds) else: normalized = self.__normalize__(img[(..., np.newaxis)], means, stds) normalized = normalized[(..., 0)] if tta: aug = tta_forward(normalized) preds = [] for img in aug: preds.append(self._predict_mean_and_scale(img, axes=new_axes, normalizer=None, resizer=resizer, n_tiles=new_n_tiles)[0]) pred = tta_backward(preds) else: pred = self._predict_mean_and_scale(normalized, axes=new_axes, normalizer=None, resizer=resizer, n_tiles=new_n_tiles)[0] pred = self.__denormalize__(pred, means, stds) if ('C' in axes): pred = np.moveaxis(pred, (- 1), axes.index('C')) return pred def predict_bioimageio(self, img: np.ndarray, axes: str, eps: float=1e-06): means = np.array([float(mean) for mean in self.config.means], ndmin=len(img.shape)) stds = np.array([float(std) for std in self.config.stds], ndmin=len(img.shape)) img = img.astype(np.float64) if ('b' in axes): axes = axes.replace('b', 'S').upper() new_axes = axes if ('C' in axes): new_axes = (axes.replace('C', '') + 'C') normalized = np.moveaxis(img, axes.index('C'), (- 1)) normalized = ((normalized - means) / (stds + eps)) else: normalized = img[(..., np.newaxis)] normalized = ((normalized - means) / (stds + eps)) normalized = normalized[(..., 0)] pred = self._predict_mean_and_scale(normalized, axes=new_axes, normalizer=None, resizer=None)[0] pred = pred.astype(np.float64) pred = self.__denormalize__(pred, means, stds) if ('C' in axes): pred = np.moveaxis(pred, (- 1), axes.index('C')) return pred def _set_logdir(self): self.logdir = (self.basedir / self.name) config_file = (self.logdir / 'config.json') if (self.config is None): if config_file.exists(): config_dict = load_json(str(config_file)) self.config = self._config_class(np.array([]), **config_dict) if (not self.config.is_valid()): invalid_attr = self.config.is_valid(True)[1] raise ValueError(('Invalid attributes in loaded config: ' + ', '.join(invalid_attr))) else: raise FileNotFoundError(("config file doesn't exist: %s" % str(config_file.resolve()))) else: if self.logdir.exists(): warnings.warn(('output path for model already exists, files may be overwritten: %s' % str(self.logdir.resolve()))) self.logdir.mkdir(parents=True, exist_ok=True) save_json(vars(self.config), str(config_file)) @suppress_without_basedir(warn=True) def export_TF(self, name: str, description: str, authors: List[str], test_img: np.ndarray, axes: str, patch_shape: Tuple[(int, int)], license: str='BSD-3-Clause', result_path: Union[(Path, str)]=None): '\n name: String\n Name of the model. \n description: String\n A short description of the model e.g. on what data it was trained.\n authors: List\n Comma seperated list of author names.\n patch_shape: Tuple\n The shape of the patches used in model.train().\n licence: String\n Model license, default is BSD-3-Clause\n result_path: String\n Path to the result folder, optional\n ' input_n_dims = len(test_img.shape) if ('C' in axes): input_n_dims -= 1 assert (input_n_dims == self.config.n_dim), 'Input and network dimensions do not match.' assert (test_img.shape[axes.index('X')] == test_img.shape[axes.index('Y')]), 'X and Y dimensions are not of same length.' if (patch_shape != None): self.config.patch_shape = patch_shape if (result_path is None): result_path = self.logdir result_path = Path(result_path).absolute() test_output = self.predict_bioimageio(test_img, axes) model_path = (result_path / 'tf_model.zip') config_path = (result_path / 'config.json') save_model_tf(model=self.keras_model, config=self.config, config_path=config_path, model_path=model_path) new_axes = axes.replace('S', 'b').lower() if ('b' not in new_axes): new_axes = ('b' + new_axes) axes = ('S' + axes) test_img = test_img[(np.newaxis, ...)] test_output = test_output[(np.newaxis, ...)] input_file = (self.logdir.absolute() / 'test_input.npy') np.save(input_file, test_img.astype(np.float64)) output_file = (self.logdir.absolute() / 'test_output.npy') np.save(output_file, test_output.astype(np.float64)) preprocessing = [{'name': 'zero_mean_unit_variance', 'kwargs': {'mode': 'fixed', 'axes': ('yx' if (len(axes) == 4) else 'zyx'), 'mean': [float(m) for m in self.config.means], 'std': [float(s) for s in self.config.stds]}}] postprocessing = [{'name': 'scale_linear', 'kwargs': {'axes': ('yx' if (len(axes) == 4) else 'zyx'), 'gain': [float(s) for s in self.config.stds], 'offset': [float(m) for m in self.config.means]}}] authors = [{'name': author} for author in authors] algorithm = which_algorithm(self.config) cite = get_algorithm_details(algorithm) doc = generate_bioimage_md(name, cite, result_path) files = [str(config_path.absolute()), str(model_path.absolute())] result_archive_path = (result_path / (result_path.stem + Extensions.BIOIMAGE_EXT.value)) build_modelzoo(result_archive_path, model_path, result_path, input_file, output_file, preprocessing, postprocessing, doc, name, authors, algorithm, tf.__version__, cite, new_axes, files) print(('\nModel exported in BioImage ModelZoo format:\n%s' % str(result_archive_path.resolve()))) @property def _config_class(self): return N2VConfig
class MaxBlurPool2D(Layer): '\n MaxBlurPool proposed in:\n Zhang, Richard. "Making convolutional networks shift-invariant again."\n International conference on machine learning. PMLR, 2019.\n\n Implementation inspired by: https://github.com/csvance/blur-pool-keras\n ' def __init__(self, pool, **kwargs): self.pool = pool self.blur_kernel = None super(MaxBlurPool2D, self).__init__(**kwargs) def build(self, input_shape): gaussian = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) gaussian = (gaussian / np.sum(gaussian)) gaussian = np.repeat(gaussian, input_shape[3]) gaussian = np.reshape(gaussian, (3, 3, input_shape[3], 1)) blur_init = keras.initializers.constant(gaussian) self.blur_kernel = self.add_weight(name='blur_kernel', shape=(3, 3, input_shape[3], 1), initializer=blur_init, trainable=False) super(MaxBlurPool2D, self).build(input_shape) def call(self, x, **kwargs): x = tf.nn.pool(x, (self.pool[0], self.pool[1]), strides=(1, 1), padding='SAME', pooling_type='MAX', data_format='NHWC') x = K.depthwise_conv2d(x, self.blur_kernel, padding='same', strides=(self.pool[0], self.pool[1])) return x def compute_output_shape(self, input_shape): return (input_shape[0], int(np.ceil((input_shape[1] / 2))), int(np.ceil((input_shape[2] / 2))), input_shape[3]) def get_config(self): config = super().get_config() config.update({'pool': self.pool}) return config
def unet_block(n_depth=2, n_filter_base=16, kernel_size=(3, 3), n_conv_per_depth=2, activation='relu', batch_norm=False, dropout=0.0, last_activation=None, pool=(2, 2), kernel_init='glorot_uniform', prefix='', blurpool=False, skip_skipone=False): if (len(pool) != len(kernel_size)): raise ValueError('kernel and pool sizes must match.') n_dim = len(kernel_size) if (n_dim not in (2, 3)): raise ValueError('unet_block only 2d or 3d.') conv_block = (conv_block2 if (n_dim == 2) else conv_block3) if blurpool: if (n_dim == 2): pooling = MaxBlurPool2D else: raise NotImplementedError else: pooling = (MaxPooling2D if (n_dim == 2) else MaxPooling3D) upsampling = (UpSampling2D if (n_dim == 2) else UpSampling3D) if (last_activation is None): last_activation = activation channel_axis = ((- 1) if backend_channels_last() else 1) def _name(s): return (prefix + s) def _func(input): skip_layers = [] layer = input for n in range(n_depth): for i in range(n_conv_per_depth): layer = conv_block((n_filter_base * (2 ** n)), *kernel_size, dropout=dropout, activation=activation, init=kernel_init, batch_norm=batch_norm, name=_name(('down_level_%s_no_%s' % (n, i))))(layer) if skip_skipone: if (n > 0): skip_layers.append(layer) else: skip_layers.append(layer) layer = pooling(pool, name=_name(('max_%s' % n)))(layer) for i in range((n_conv_per_depth - 1)): layer = conv_block((n_filter_base * (2 ** n_depth)), *kernel_size, dropout=dropout, init=kernel_init, activation=activation, batch_norm=batch_norm, name=_name(('middle_%s' % i)))(layer) layer = conv_block((n_filter_base * (2 ** max(0, (n_depth - 1)))), *kernel_size, dropout=dropout, activation=activation, init=kernel_init, batch_norm=batch_norm, name=_name(('middle_%s' % n_conv_per_depth)))(layer) for n in reversed(range(n_depth)): if skip_skipone: if (n > 0): layer = Concatenate(axis=channel_axis)([upsampling(pool)(layer), skip_layers[(n - 1)]]) else: layer = upsampling(pool)(layer) else: layer = Concatenate(axis=channel_axis)([upsampling(pool)(layer), skip_layers[n]]) for i in range((n_conv_per_depth - 1)): if (skip_skipone and (n > 0)): n_filter = (n_filter_base * (2 ** n)) else: n_filter = n_filter_base layer = conv_block(n_filter, *kernel_size, dropout=dropout, init=kernel_init, activation=activation, batch_norm=batch_norm, name=_name(('up_level_%s_no_%s' % (n, i))))(layer) layer = conv_block((n_filter_base * (2 ** max(0, (n - 1)))), *kernel_size, dropout=dropout, init=kernel_init, activation=(activation if (n > 0) else last_activation), batch_norm=batch_norm, name=_name(('up_level_%s_no_%s' % (n, n_conv_per_depth))))(layer) return layer return _func
def PSNR(gt, img, range): '\n Compute Peak Signal-to-Noise Ratio.\n\n Parameters:\n gt: np.array\n The ground truth target image.\n img: np.array\n The image of interest.\n range: float\n Intensity range e.g. gt.max() - gt.min() used for the PSNR\n computation.\n ' mse = np.mean(np.square((gt - img))) return ((20 * np.log10(range)) - (10 * np.log10(mse)))
def best_PSNR(gt, img, range): '\n Compute best Peak Signal-to-Noise Ratio by normalizing img such that\n MSE is minimized to the gt image.\n\n Parameters:\n gt: np.array\n The ground truth target image.\n img: np.array\n The image of interest.\n range: float\n Intensity range e.g. gt.max() - gt.min() used for the PSNR\n computation.\n ' img_n = normalize_minmse(img, gt) return PSNR(gt, img_n, range=range)
class Extensions(Enum): BIOIMAGE_EXT = '.bioimage.io.zip' KERAS_EXT = '.h5' TF_EXT = '.zip'
class Format(Enum): H5 = 'h5' TF = 'tf'
class Algorithm(Enum): N2V = 0 StructN2V = 1 N2V2 = 2 @staticmethod def get_name(algorithm: int) -> str: if (algorithm == 1): return 'structN2V' elif (algorithm == 2): return 'N2V2' else: return 'Noise2Void'
class PixelManipulator(Enum): UNIFORM_WITH_CP = 'uniform_withCP' UNIFORM_WITHOUT_CP = 'uniform_withoutCP' NORMAL_WITHOUT_CP = 'normal_withoutCP' NORMAL_ADDITIVE = 'normal_additive' NORMAL_FITTED = 'normal_fitted' IDENTITY = 'identity' MEAN = 'mean' MEDIAN = 'median'
def which_algorithm(config: N2VConfig): '\n Checks which algorithm the model is configured for (N2V, N2V2, structN2V).\n ' if (config.structN2Vmask is not None): return Algorithm.StructN2V elif ((config.n2v_manipulator == PixelManipulator.MEDIAN.value) and (not config.unet_residual) and config.blurpool and config.skip_skipone): return Algorithm.N2V2 else: return Algorithm.N2V
def generate_bioimage_md(name: str, cite: list, path: Path): '\n Generate a generic document.md file for the bioimage.io format.\n ' file = (path / 'napari-n2v.md') with open(file, 'w') as f: text = cite[0]['text'] content = f'''## {name} This network was trained using [napari-n2v](https://pypi.org/project/napari-n2v/). ## Cite {name} {text}''' f.write(content) return file.absolute()
def get_algorithm_details(algorithm: Algorithm): '\n Returns name, authors and citation related to the algorithm, formatted as expected by bioimage.io\n model builder.\n ' if (algorithm == Algorithm.StructN2V): citation = [{'text': 'C. Broaddus, A. Krull, M. Weigert, U. Schmidt and G. Myers, "Removing Structured Noise with Self-Supervised Blind-Spot Networks," 2020 IEEE 17th International Symposium on Biomedical Imaging (ISBI), 2020, pp. 159-163', 'doi': '10.1109/ISBI45749.2020.9098336'}] elif (algorithm == Algorithm.N2V2): citation = [{'text': 'E. Hoeck, T.-O. Buchholz, A. Brachmann, F. Jug and A. Freytag, "N2V2--Fixing Noise2Void Checkerboard Artifacts with Modified Sampling Strategies and a Tweaked Network Architecture." arXiv preprint arXiv:2211.08512 (2022).', 'doi': '10.48550/arXiv.2211.08512'}] else: citation = [{'text': 'A. Krull, T.-O. Buchholz and F. Jug, "Noise2Void - Learning Denoising From Single Noisy Images," 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 2124-2132', 'doi': '10.48550/arXiv.1811.10980'}] return citation
def build_modelzoo(result_path: Union[(str, Path)], weights_path: Union[(str, Path)], bundle_path: Union[(str, Path)], inputs: str, outputs: str, preprocessing: list, postprocessing: list, doc: Union[(str, Path)], name: str, authors: list, algorithm: Algorithm, tf_version: str, cite: List[Dict], axes: str='byxc', files: list=[], **kwargs): from bioimageio.core.build_spec import build_model tags_dim = ('3d' if (len(axes) == 5) else '2d') build_model(root=bundle_path, weight_uri=weights_path, test_inputs=[inputs], test_outputs=[outputs], input_axes=[axes], output_axes=[axes], output_path=result_path, name=name, description='Self-supervised denoising.', authors=authors, license='BSD-3-Clause', documentation=doc, tags=[tags_dim, 'unet', 'denoising', Algorithm.get_name(algorithm.value), 'tensorflow', 'napari'], preprocessing=[preprocessing], postprocessing=[postprocessing], tensorflow_version=tf_version, attachments={'files': files}, cite=cite, **kwargs)
def save_model_tf(model, config, model_path, config_path): model_folder_path = (model_path.parent / model_path.stem) tf.keras.models.save_model(model, model_folder_path, save_format=Format.TF.value, include_optimizer=False) save_json(vars(config), config_path) final_archive = model_path.absolute() with ZipFile(final_archive, mode='w') as archive: for file_path in model_folder_path.rglob('*'): archive.write(file_path, arcname=file_path.relative_to(model_folder_path)) return final_archive
def get_subpatch(patch, coord, local_sub_patch_radius, crop_patch=True): (crop_neg, crop_pos) = (0, 0) if crop_patch: start = (np.array(coord) - local_sub_patch_radius) end = ((start + (local_sub_patch_radius * 2)) + 1) crop_neg = np.minimum(start, 0) crop_pos = np.maximum(0, (end - patch.shape)) start -= crop_neg end -= crop_pos else: start = np.maximum(0, (np.array(coord) - local_sub_patch_radius)) end = ((start + (local_sub_patch_radius * 2)) + 1) shift = np.minimum(0, (patch.shape - end)) start += shift end += shift slices = [slice(s, e) for (s, e) in zip(start, end)] return (patch[tuple(slices)], crop_neg, crop_pos)
def random_neighbor(shape, coord): rand_coords = sample_coords(shape, coord) while np.any((rand_coords == coord)): rand_coords = sample_coords(shape, coord) return rand_coords
def sample_coords(shape, coord, sigma=4): return [normal_int(c, sigma, s) for (c, s) in zip(coord, shape)]
def normal_int(mean, sigma, w): return int(np.clip(np.round(np.random.normal(mean, sigma)), 0, (w - 1)))
def mask_center(local_sub_patch_radius, ndims=2): size = ((local_sub_patch_radius * 2) + 1) patch_wo_center = np.ones(((size,) * ndims)) if (ndims == 2): patch_wo_center[(local_sub_patch_radius, local_sub_patch_radius)] = 0 elif (ndims == 3): patch_wo_center[(local_sub_patch_radius, local_sub_patch_radius, local_sub_patch_radius)] = 0 else: raise NotImplementedError() return ma.make_mask(patch_wo_center)
def pm_normal_withoutCP(local_sub_patch_radius): def normal_withoutCP(patch, coords, dims, structN2Vmask=None): vals = [] for coord in zip(*coords): rand_coords = random_neighbor(patch.shape, coord) vals.append(patch[tuple(rand_coords)]) return vals return normal_withoutCP
def pm_mean(local_sub_patch_radius): def patch_mean(patch, coords, dims, structN2Vmask=None): patch_wo_center = mask_center(local_sub_patch_radius, ndims=dims) vals = [] for coord in zip(*coords): (sub_patch, crop_neg, crop_pos) = get_subpatch(patch, coord, local_sub_patch_radius) slices = [slice((- n), (s - p)) for (n, p, s) in zip(crop_neg, crop_pos, patch_wo_center.shape)] sub_patch_mask = (structN2Vmask or patch_wo_center)[tuple(slices)] vals.append(np.mean(sub_patch[sub_patch_mask])) return vals return patch_mean
def pm_median(local_sub_patch_radius): def patch_median(patch, coords, dims, structN2Vmask=None): patch_wo_center = mask_center(local_sub_patch_radius, ndims=dims) vals = [] for coord in zip(*coords): (sub_patch, crop_neg, crop_pos) = get_subpatch(patch, coord, local_sub_patch_radius) slices = [slice((- n), (s - p)) for (n, p, s) in zip(crop_neg, crop_pos, patch_wo_center.shape)] sub_patch_mask = (structN2Vmask or patch_wo_center)[tuple(slices)] vals.append(np.median(sub_patch[sub_patch_mask])) return vals return patch_median
def pm_uniform_withCP(local_sub_patch_radius): def random_neighbor_withCP_uniform(patch, coords, dims, structN2Vmask=None): vals = [] for coord in zip(*coords): (sub_patch, _, _) = get_subpatch(patch, coord, local_sub_patch_radius) rand_coords = [np.random.randint(0, s) for s in sub_patch.shape[0:dims]] vals.append(sub_patch[tuple(rand_coords)]) return vals return random_neighbor_withCP_uniform
def pm_uniform_withoutCP(local_sub_patch_radius): def random_neighbor_withoutCP_uniform(patch, coords, dims, structN2Vmask=None): patch_wo_center = mask_center(local_sub_patch_radius, ndims=dims) vals = [] for coord in zip(*coords): (sub_patch, crop_neg, crop_pos) = get_subpatch(patch, coord, local_sub_patch_radius) slices = [slice((- n), (s - p)) for (n, p, s) in zip(crop_neg, crop_pos, patch_wo_center.shape)] sub_patch_mask = (structN2Vmask or patch_wo_center)[tuple(slices)] vals.append(np.random.permutation(sub_patch[sub_patch_mask])[0]) return vals return random_neighbor_withoutCP_uniform
def pm_normal_additive(pixel_gauss_sigma): def pixel_gauss(patch, coords, dims, structN2Vmask=None): vals = [] for coord in zip(*coords): vals.append(np.random.normal(patch[tuple(coord)], pixel_gauss_sigma)) return vals return pixel_gauss
def pm_normal_fitted(local_sub_patch_radius): def local_gaussian(patch, coords, dims, structN2Vmask=None): vals = [] for coord in zip(*coords): (sub_patch, _, _) = get_subpatch(patch, coord, local_sub_patch_radius) axis = tuple(range(dims)) vals.append(np.random.normal(np.mean(sub_patch, axis=axis), np.std(sub_patch, axis=axis))) return vals return local_gaussian
def pm_identity(local_sub_patch_radius): def identity(patch, coords, dims, structN2Vmask=None): vals = [] for coord in zip(*coords): vals.append(patch[coord]) return vals return identity
def manipulate_val_data(X_val, Y_val, perc_pix=0.198, shape=(64, 64), value_manipulation=pm_uniform_withCP(5)): dims = len(shape) if (dims == 2): box_size = np.round(np.sqrt((100 / perc_pix))).astype(np.int32) get_stratified_coords = dw.__get_stratified_coords2D__ rand_float = dw.__rand_float_coords2D__(box_size) elif (dims == 3): box_size = np.round(np.sqrt((100 / perc_pix))).astype(np.int32) get_stratified_coords = dw.__get_stratified_coords3D__ rand_float = dw.__rand_float_coords3D__(box_size) n_chan = X_val.shape[(- 1)] Y_val *= 0 for j in tqdm(range(X_val.shape[0]), desc='Preparing validation data: '): coords = get_stratified_coords(rand_float, box_size=box_size, shape=np.array(X_val.shape)[1:(- 1)]) for c in range(n_chan): indexing = (((j,) + coords) + (c,)) indexing_mask = (((j,) + coords) + ((c + n_chan),)) y_val = X_val[indexing] x_val = value_manipulation(X_val[(j, ..., c)], coords, dims) Y_val[indexing] = y_val Y_val[indexing_mask] = 1 X_val[indexing] = x_val
def autocorrelation(x): '\n nD autocorrelation\n remove mean per-patch (not global GT)\n normalize stddev to 1\n value at zero shift normalized to 1...\n ' x = ((x - np.mean(x)) / np.std(x)) x = np.fft.fftn(x) x = (np.abs(x) ** 2) x = np.fft.ifftn(x).real x = (x / x.flat[0]) x = np.fft.fftshift(x) return x
def tta_forward(x): '\n Augments x 8-fold: all 90 deg rotations plus lr flip of the four rotated versions.\n\n Parameters\n ----------\n x: data to augment\n\n Returns\n -------\n Stack of augmented x.\n ' x_aug = [x, np.rot90(x, 1), np.rot90(x, 2), np.rot90(x, 3)] x_aug_flip = x_aug.copy() for x_ in x_aug: x_aug_flip.append(np.fliplr(x_)) return x_aug_flip