code
stringlengths
17
6.64M
def countless_generalized(data, factor): assert (len(data.shape) == len(factor)) sections = [] mode_of = reduce((lambda x, y: (x * y)), factor) majority = int(math.ceil((float(mode_of) / 2))) data += 1 for offset in np.ndindex(factor): part = data[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))] sections.append(part) def pick(elements): eq = ((elements[i] == elements[(i + 1)]) for i in range((len(elements) - 1))) anded = reduce((lambda p, q: (p & q)), eq) return (elements[0] * anded) def logical_or(x, y): return (x + ((x == 0) * y)) result = (pick(combo) for combo in combinations(sections, majority)) result = reduce(logical_or, result) for i in range((majority - 1), (3 - 1), (- 1)): partial_result = (pick(combo) for combo in combinations(sections, i)) partial_result = reduce(logical_or, partial_result) result = logical_or(result, partial_result) partial_result = (pick(combo) for combo in combinations(sections[:(- 1)], 2)) partial_result = reduce(logical_or, partial_result) result = logical_or(result, partial_result) result = (logical_or(result, sections[(- 1)]) - 1) data -= 1 return result
def dynamic_countless_generalized(data, factor): assert (len(data.shape) == len(factor)) sections = [] mode_of = reduce((lambda x, y: (x * y)), factor) majority = int(math.ceil((float(mode_of) / 2))) data += 1 for offset in np.ndindex(factor): part = data[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))] sections.append(part) pick = (lambda a, b: (a * (a == b))) lor = (lambda x, y: (x + ((x == 0) * y))) subproblems = [{}, {}] results2 = None for (x, y) in combinations(range((len(sections) - 1)), 2): res = pick(sections[x], sections[y]) subproblems[0][(x, y)] = res if (results2 is not None): results2 = lor(results2, res) else: results2 = res results = [results2] for r in range(3, (majority + 1)): r_results = None for combo in combinations(range(len(sections)), r): res = pick(subproblems[0][combo[:(- 1)]], sections[combo[(- 1)]]) if (combo[(- 1)] != (len(sections) - 1)): subproblems[1][combo] = res if (r_results is not None): r_results = lor(r_results, res) else: r_results = res results.append(r_results) subproblems[0] = subproblems[1] subproblems[1] = {} results.reverse() final_result = (lor(reduce(lor, results), sections[(- 1)]) - 1) data -= 1 return final_result
def downsample_with_averaging(array): '\n Downsample x by factor using averaging.\n\n @return: The downsampled array, of the same type as x.\n ' factor = (2, 2, 2) if np.array_equal(factor[:3], np.array([1, 1, 1])): return array output_shape = tuple((int(math.ceil((s / f))) for (s, f) in zip(array.shape, factor))) temp = np.zeros(output_shape, float) counts = np.zeros(output_shape, np.int) for offset in np.ndindex(factor): part = array[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))] indexing_expr = tuple((np.s_[:s] for s in part.shape)) temp[indexing_expr] += part counts[indexing_expr] += 1 return np.cast[array.dtype]((temp / counts))
def downsample_with_max_pooling(array): factor = (2, 2, 2) sections = [] for offset in np.ndindex(factor): part = array[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))] sections.append(part) output = sections[0].copy() for section in sections[1:]: np.maximum(output, section, output) return output
def striding(array): 'Downsample x by factor using striding.\n\n @return: The downsampled array, of the same type as x.\n ' factor = (2, 2, 2) if np.all((np.array(factor, int) == 1)): return array return array[tuple((np.s_[::f] for f in factor))]
def benchmark(): def countless3d_generalized(img): return countless_generalized(img, (2, 8, 1)) def countless3d_dynamic_generalized(img): return dynamic_countless_generalized(img, (8, 8, 1)) methods = [countless3d_generalized] data = (np.zeros(shape=((16 ** 2), (16 ** 2), (16 ** 2)), dtype=np.uint8) + 1) N = 5 print(('Algorithm\tMPx\tMB/sec\tSec\tN=%d' % N)) for fn in methods: start = time.time() for _ in range(N): result = fn(data) end = time.time() total_time = (end - start) mpx = ((((N * float(((data.shape[0] * data.shape[1]) * data.shape[2]))) / total_time) / 1024.0) / 1024.0) mbytes = (mpx * np.dtype(data.dtype).itemsize) print(('%s\t%.3f\t%.3f\t%.2f' % (fn.__name__, mpx, mbytes, total_time)))
def test_countless2d(): def test_all_cases(fn, test_zero): case1 = np.array([[1, 2], [3, 4]]).reshape((2, 2, 1, 1)) case2 = np.array([[1, 1], [2, 3]]).reshape((2, 2, 1, 1)) case1z = np.array([[0, 1], [2, 3]]).reshape((2, 2, 1, 1)) case2z = np.array([[0, 0], [2, 3]]).reshape((2, 2, 1, 1)) case3 = np.array([[1, 1], [2, 2]]).reshape((2, 2, 1, 1)) case4 = np.array([[1, 2], [2, 2]]).reshape((2, 2, 1, 1)) case5 = np.array([[5, 5], [5, 5]]).reshape((2, 2, 1, 1)) is_255_handled = np.array([[255, 255], [1, 2]], dtype=np.uint8).reshape((2, 2, 1, 1)) test = (lambda case: fn(case)) if test_zero: assert (test(case1z) == [[[[3]]]]) assert (test(case2z) == [[[[0]]]]) else: assert (test(case1) == [[[[4]]]]) assert (test(case2) == [[[[1]]]]) assert (test(case3) == [[[[1]]]]) assert (test(case4) == [[[[2]]]]) assert (test(case5) == [[[[5]]]]) assert (test(is_255_handled) == [[[[255]]]]) assert (fn(case1).dtype == case1.dtype) test_all_cases(countless2d.simplest_countless, False) test_all_cases(countless2d.quick_countless, False) test_all_cases(countless2d.quickest_countless, False) test_all_cases(countless2d.stippled_countless, False) methods = [countless2d.zero_corrected_countless, countless2d.countless, countless2d.countless_if] for fn in methods: print(fn.__name__) test_all_cases(fn, True)
def test_stippled_countless2d(): a = np.array([[1, 2], [3, 4]]).reshape((2, 2, 1, 1)) b = np.array([[0, 2], [3, 4]]).reshape((2, 2, 1, 1)) c = np.array([[1, 0], [3, 4]]).reshape((2, 2, 1, 1)) d = np.array([[1, 2], [0, 4]]).reshape((2, 2, 1, 1)) e = np.array([[1, 2], [3, 0]]).reshape((2, 2, 1, 1)) f = np.array([[0, 0], [3, 4]]).reshape((2, 2, 1, 1)) g = np.array([[0, 2], [0, 4]]).reshape((2, 2, 1, 1)) h = np.array([[0, 2], [3, 0]]).reshape((2, 2, 1, 1)) i = np.array([[1, 0], [0, 4]]).reshape((2, 2, 1, 1)) j = np.array([[1, 2], [0, 0]]).reshape((2, 2, 1, 1)) k = np.array([[1, 0], [3, 0]]).reshape((2, 2, 1, 1)) l = np.array([[1, 0], [0, 0]]).reshape((2, 2, 1, 1)) m = np.array([[0, 2], [0, 0]]).reshape((2, 2, 1, 1)) n = np.array([[0, 0], [3, 0]]).reshape((2, 2, 1, 1)) o = np.array([[0, 0], [0, 4]]).reshape((2, 2, 1, 1)) z = np.array([[0, 0], [0, 0]]).reshape((2, 2, 1, 1)) test = countless2d.stippled_countless assert (test(a) == [[[[4]]]]) assert (test(b) == [[[[4]]]]) assert (test(c) == [[[[4]]]]) assert (test(d) == [[[[4]]]]) assert (test(e) == [[[[1]]]]) assert (test(f) == [[[[4]]]]) assert (test(g) == [[[[4]]]]) assert (test(h) == [[[[2]]]]) assert (test(i) == [[[[4]]]]) assert (test(j) == [[[[1]]]]) assert (test(k) == [[[[1]]]]) assert (test(l) == [[[[1]]]]) assert (test(m) == [[[[2]]]]) assert (test(n) == [[[[3]]]]) assert (test(o) == [[[[4]]]]) assert (test(z) == [[[[0]]]]) bc = np.array([[0, 2], [2, 4]]).reshape((2, 2, 1, 1)) bd = np.array([[0, 2], [3, 2]]).reshape((2, 2, 1, 1)) cd = np.array([[0, 2], [3, 3]]).reshape((2, 2, 1, 1)) assert (test(bc) == [[[[2]]]]) assert (test(bd) == [[[[2]]]]) assert (test(cd) == [[[[3]]]]) ab = np.array([[1, 1], [0, 4]]).reshape((2, 2, 1, 1)) ac = np.array([[1, 2], [1, 0]]).reshape((2, 2, 1, 1)) ad = np.array([[1, 0], [3, 1]]).reshape((2, 2, 1, 1)) assert (test(ab) == [[[[1]]]]) assert (test(ac) == [[[[1]]]]) assert (test(ad) == [[[[1]]]])
def test_countless3d(): def test_all_cases(fn): alldifferent = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] allsame = [[[1, 1], [1, 1]], [[1, 1], [1, 1]]] assert (fn(np.array(alldifferent)) == [[[8]]]) assert (fn(np.array(allsame)) == [[[1]]]) twosame = deepcopy(alldifferent) twosame[1][1][0] = 2 assert (fn(np.array(twosame)) == [[[2]]]) threemixed = [[[3, 3], [1, 2]], [[2, 4], [4, 3]]] assert (fn(np.array(threemixed)) == [[[3]]]) foursame = [[[4, 4], [1, 2]], [[2, 4], [4, 3]]] assert (fn(np.array(foursame)) == [[[4]]]) fivesame = [[[5, 4], [5, 5]], [[2, 4], [5, 5]]] assert (fn(np.array(fivesame)) == [[[5]]]) def countless3d_generalized(img): return countless3d.countless_generalized(img, (2, 2, 2)) def countless3d_dynamic_generalized(img): return countless3d.dynamic_countless_generalized(img, (2, 2, 2)) methods = [countless3d.countless3d, countless3d.dynamic_countless3d, countless3d_generalized, countless3d_dynamic_generalized] for fn in methods: test_all_cases(fn)
def load_yaml(path): with open(path, 'r') as f: return edict(yaml.safe_load(f))
def move_to_device(obj, device): if isinstance(obj, nn.Module): return obj.to(device) if torch.is_tensor(obj): return obj.to(device) if isinstance(obj, (tuple, list)): return [move_to_device(el, device) for el in obj] if isinstance(obj, dict): return {name: move_to_device(val, device) for (name, val) in obj.items()} raise ValueError(f'Unexpected type {type(obj)}')
class SmallMode(Enum): DROP = 'drop' UPSCALE = 'upscale'
def save_item_for_vis(item, out_file): mask = (item['mask'] > 0.5) if (mask.ndim == 3): mask = mask[0] img = mark_boundaries(np.transpose(item['image'], (1, 2, 0)), mask, color=(1.0, 0.0, 0.0), outline_color=(1.0, 1.0, 1.0), mode='thick') if ('inpainted' in item): inp_img = mark_boundaries(np.transpose(item['inpainted'], (1, 2, 0)), mask, color=(1.0, 0.0, 0.0), mode='outer') img = np.concatenate((img, inp_img), axis=1) img = np.clip((img * 255), 0, 255).astype('uint8') io.imsave(out_file, img)
def save_mask_for_sidebyside(item, out_file): mask = item['mask'] if (mask.ndim == 3): mask = mask[0] mask = np.clip((mask * 255), 0, 255).astype('uint8') io.imsave(out_file, mask)
def save_img_for_sidebyside(item, out_file): img = np.transpose(item['image'], (1, 2, 0)) img = np.clip((img * 255), 0, 255).astype('uint8') io.imsave(out_file, img)
class IAAAffine2(DualIAATransform): 'Place a regular grid of points on the input and randomly move the neighbourhood of these point around\n via affine transformations.\n\n Note: This class introduce interpolation artifacts to mask if it has values other than {0;1}\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask\n ' def __init__(self, scale=(0.7, 1.3), translate_percent=None, translate_px=None, rotate=0.0, shear=((- 0.1), 0.1), order=1, cval=0, mode='reflect', always_apply=False, p=0.5): super(IAAAffine2, self).__init__(always_apply, p) self.scale = dict(x=scale, y=scale) self.translate_percent = to_tuple(translate_percent, 0) self.translate_px = to_tuple(translate_px, 0) self.rotate = to_tuple(rotate) self.shear = dict(x=shear, y=shear) self.order = order self.cval = cval self.mode = mode @property def processor(self): return iaa.Affine(self.scale, self.translate_percent, self.translate_px, self.rotate, self.shear, self.order, self.cval, self.mode) def get_transform_init_args_names(self): return ('scale', 'translate_percent', 'translate_px', 'rotate', 'shear', 'order', 'cval', 'mode')
class IAAPerspective2(DualIAATransform): "Perform a random four point perspective transform of the input.\n\n Note: This class introduce interpolation artifacts to mask if it has values other than {0;1}\n\n Args:\n scale ((float, float): standard deviation of the normal distributions. These are used to sample\n the random distances of the subimage's corners from the full image's corners. Default: (0.05, 0.1).\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask\n " def __init__(self, scale=(0.05, 0.1), keep_size=True, always_apply=False, p=0.5, order=1, cval=0, mode='replicate'): super(IAAPerspective2, self).__init__(always_apply, p) self.scale = to_tuple(scale, 1.0) self.keep_size = keep_size self.cval = cval self.mode = mode @property def processor(self): return iaa.PerspectiveTransform(self.scale, keep_size=self.keep_size, mode=self.mode, cval=self.cval) def get_transform_init_args_names(self): return ('scale', 'keep_size')
class InpaintingTrainDataset(Dataset): def __init__(self, indir, mask_generator, transform): self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True)) self.mask_generator = mask_generator self.transform = transform self.iter_i = 0 def __len__(self): return len(self.in_files) def __getitem__(self, item): path = self.in_files[item] img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = self.transform(image=img)['image'] img = np.transpose(img, (2, 0, 1)) mask = self.mask_generator(img, iter_i=self.iter_i) self.iter_i += 1 return dict(image=img, mask=mask)
class InpaintingTrainWebDataset(IterableDataset): def __init__(self, indir, mask_generator, transform, shuffle_buffer=200): self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode('rgb').to_tuple('jpg') self.mask_generator = mask_generator self.transform = transform def __iter__(self): for (iter_i, (img,)) in enumerate(self.impl): img = np.clip((img * 255), 0, 255).astype('uint8') img = self.transform(image=img)['image'] img = np.transpose(img, (2, 0, 1)) mask = self.mask_generator(img, iter_i=iter_i) (yield dict(image=img, mask=mask))
class ImgSegmentationDataset(Dataset): def __init__(self, indir, mask_generator, transform, out_size, segm_indir, semantic_seg_n_classes): self.indir = indir self.segm_indir = segm_indir self.mask_generator = mask_generator self.transform = transform self.out_size = out_size self.semantic_seg_n_classes = semantic_seg_n_classes self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True)) def __len__(self): return len(self.in_files) def __getitem__(self, item): path = self.in_files[item] img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (self.out_size, self.out_size)) img = self.transform(image=img)['image'] img = np.transpose(img, (2, 0, 1)) mask = self.mask_generator(img) (segm, segm_classes) = self.load_semantic_segm(path) result = dict(image=img, mask=mask, segm=segm, segm_classes=segm_classes) return result def load_semantic_segm(self, img_path): segm_path = img_path.replace(self.indir, self.segm_indir).replace('.jpg', '.png') mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE) mask = cv2.resize(mask, (self.out_size, self.out_size)) tensor = torch.from_numpy(np.clip((mask.astype(int) - 1), 0, None)) ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) return (ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0))
def get_transforms(transform_variant, out_size): if (transform_variant == 'default'): transform = A.Compose([A.RandomScale(scale_limit=0.2), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()]) elif (transform_variant == 'distortions'): transform = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.7, 1.3), rotate=((- 40), 40), shear=((- 0.1), 0.1)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()]) elif (transform_variant == 'distortions_scale05_1'): transform = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.5, 1.0), rotate=((- 40), 40), shear=((- 0.1), 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()]) elif (transform_variant == 'distortions_scale03_12'): transform = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 1.2), rotate=((- 40), 40), shear=((- 0.1), 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()]) elif (transform_variant == 'distortions_scale03_07'): transform = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 0.7), rotate=((- 40), 40), shear=((- 0.1), 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()]) elif (transform_variant == 'distortions_light'): transform = A.Compose([IAAPerspective2(scale=(0.0, 0.02)), IAAAffine2(scale=(0.8, 1.8), rotate=((- 20), 20), shear=((- 0.03), 0.03)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()]) elif (transform_variant == 'non_space_transform'): transform = A.Compose([A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()]) elif (transform_variant == 'no_augs'): transform = A.Compose([A.ToFloat()]) else: raise ValueError(f'Unexpected transform_variant {transform_variant}') return transform
def make_default_train_dataloader(indir, kind='default', out_size=512, mask_gen_kwargs=None, transform_variant='default', mask_generator_kind='mixed', dataloader_kwargs=None, ddp_kwargs=None, **kwargs): LOGGER.info(f'Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}') mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs) transform = get_transforms(transform_variant, out_size) if (kind == 'default'): dataset = InpaintingTrainDataset(indir=indir, mask_generator=mask_generator, transform=transform, **kwargs) elif (kind == 'default_web'): dataset = InpaintingTrainWebDataset(indir=indir, mask_generator=mask_generator, transform=transform, **kwargs) elif (kind == 'img_with_segm'): dataset = ImgSegmentationDataset(indir=indir, mask_generator=mask_generator, transform=transform, out_size=out_size, **kwargs) else: raise ValueError(f'Unknown train dataset kind {kind}') if (dataloader_kwargs is None): dataloader_kwargs = {} is_dataset_only_iterable = (kind in ('default_web',)) if ((ddp_kwargs is not None) and (not is_dataset_only_iterable)): dataloader_kwargs['shuffle'] = False dataloader_kwargs['sampler'] = DistributedSampler(dataset, **ddp_kwargs) if (is_dataset_only_iterable and ('shuffle' in dataloader_kwargs)): with open_dict(dataloader_kwargs): del dataloader_kwargs['shuffle'] dataloader = DataLoader(dataset, **dataloader_kwargs) return dataloader
def make_default_val_dataset(indir, kind='default', out_size=512, transform_variant='default', **kwargs): if (OmegaConf.is_list(indir) or isinstance(indir, (tuple, list))): return ConcatDataset([make_default_val_dataset(idir, kind=kind, out_size=out_size, transform_variant=transform_variant, **kwargs) for idir in indir]) LOGGER.info(f'Make val dataloader {kind} from {indir}') mask_generator = get_mask_generator(kind=kwargs.get('mask_generator_kind'), kwargs=kwargs.get('mask_gen_kwargs')) if (transform_variant is not None): transform = get_transforms(transform_variant, out_size) if (kind == 'default'): dataset = InpaintingEvaluationDataset(indir, **kwargs) elif (kind == 'our_eval'): dataset = OurInpaintingEvaluationDataset(indir, **kwargs) elif (kind == 'img_with_segm'): dataset = ImgSegmentationDataset(indir=indir, mask_generator=mask_generator, transform=transform, out_size=out_size, **kwargs) elif (kind == 'online'): dataset = InpaintingEvalOnlineDataset(indir=indir, mask_generator=mask_generator, transform=transform, out_size=out_size, **kwargs) else: raise ValueError(f'Unknown val dataset kind {kind}') return dataset
def make_default_val_dataloader(*args, dataloader_kwargs=None, **kwargs): dataset = make_default_val_dataset(*args, **kwargs) if (dataloader_kwargs is None): dataloader_kwargs = {} dataloader = DataLoader(dataset, **dataloader_kwargs) return dataloader
def make_constant_area_crop_params(img_height, img_width, min_size=128, max_size=512, area=(256 * 256), round_to_mod=16): min_size = min(img_height, img_width, min_size) max_size = min(img_height, img_width, max_size) if (random.random() < 0.5): out_height = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod)) out_width = min(max_size, ceil_modulo((area // out_height), round_to_mod)) else: out_width = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod)) out_height = min(max_size, ceil_modulo((area // out_width), round_to_mod)) start_y = random.randint(0, (img_height - out_height)) start_x = random.randint(0, (img_width - out_width)) return (start_y, start_x, out_height, out_width)
class BaseAdversarialLoss(): def pre_generator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, generator: nn.Module, discriminator: nn.Module): '\n Prepare for generator step\n :param real_batch: Tensor, a batch of real samples\n :param fake_batch: Tensor, a batch of samples produced by generator\n :param generator:\n :param discriminator:\n :return: None\n ' def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, generator: nn.Module, discriminator: nn.Module): '\n Prepare for discriminator step\n :param real_batch: Tensor, a batch of real samples\n :param fake_batch: Tensor, a batch of samples produced by generator\n :param generator:\n :param discriminator:\n :return: None\n ' def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, mask: Optional[torch.Tensor]=None) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: '\n Calculate generator loss\n :param real_batch: Tensor, a batch of real samples\n :param fake_batch: Tensor, a batch of samples produced by generator\n :param discr_real_pred: Tensor, discriminator output for real_batch\n :param discr_fake_pred: Tensor, discriminator output for fake_batch\n :param mask: Tensor, actual mask, which was at input of generator when making fake_batch\n :return: total generator loss along with some values that might be interesting to log\n ' raise NotImplemented() def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, mask: Optional[torch.Tensor]=None) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: '\n Calculate discriminator loss and call .backward() on it\n :param real_batch: Tensor, a batch of real samples\n :param fake_batch: Tensor, a batch of samples produced by generator\n :param discr_real_pred: Tensor, discriminator output for real_batch\n :param discr_fake_pred: Tensor, discriminator output for fake_batch\n :param mask: Tensor, actual mask, which was at input of generator when making fake_batch\n :return: total discriminator loss along with some values that might be interesting to log\n ' raise NotImplemented() def interpolate_mask(self, mask, shape): assert (mask is not None) assert (self.allow_scale_mask or (shape == mask.shape[(- 2):])) if ((shape != mask.shape[(- 2):]) and self.allow_scale_mask): if (self.mask_scale_mode == 'maxpool'): mask = F.adaptive_max_pool2d(mask, shape) else: mask = F.interpolate(mask, size=shape, mode=self.mask_scale_mode) return mask
def make_r1_gp(discr_real_pred, real_batch): if torch.is_grad_enabled(): grad_real = torch.autograd.grad(outputs=discr_real_pred.sum(), inputs=real_batch, create_graph=True)[0] grad_penalty = (grad_real.view(grad_real.shape[0], (- 1)).norm(2, dim=1) ** 2).mean() else: grad_penalty = 0 real_batch.requires_grad = False return grad_penalty
class NonSaturatingWithR1(BaseAdversarialLoss): def __init__(self, gp_coef=5, weight=1, mask_as_fake_target=False, allow_scale_mask=False, mask_scale_mode='nearest', extra_mask_weight_for_gen=0, use_unmasked_for_gen=True, use_unmasked_for_discr=True): self.gp_coef = gp_coef self.weight = weight assert (use_unmasked_for_gen or (not use_unmasked_for_discr)) assert (use_unmasked_for_discr or (not mask_as_fake_target)) self.use_unmasked_for_gen = use_unmasked_for_gen self.use_unmasked_for_discr = use_unmasked_for_discr self.mask_as_fake_target = mask_as_fake_target self.allow_scale_mask = allow_scale_mask self.mask_scale_mode = mask_scale_mode self.extra_mask_weight_for_gen = extra_mask_weight_for_gen def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, mask=None) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: fake_loss = F.softplus((- discr_fake_pred)) if ((self.mask_as_fake_target and (self.extra_mask_weight_for_gen > 0)) or (not self.use_unmasked_for_gen)): mask = self.interpolate_mask(mask, discr_fake_pred.shape[(- 2):]) if (not self.use_unmasked_for_gen): fake_loss = (fake_loss * mask) else: pixel_weights = (1 + (mask * self.extra_mask_weight_for_gen)) fake_loss = (fake_loss * pixel_weights) return ((fake_loss.mean() * self.weight), dict()) def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, generator: nn.Module, discriminator: nn.Module): real_batch.requires_grad = True def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, mask=None) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: real_loss = F.softplus((- discr_real_pred)) grad_penalty = (make_r1_gp(discr_real_pred, real_batch) * self.gp_coef) fake_loss = F.softplus(discr_fake_pred) if ((not self.use_unmasked_for_discr) or self.mask_as_fake_target): mask = self.interpolate_mask(mask, discr_fake_pred.shape[(- 2):]) fake_loss = (fake_loss * mask) if self.mask_as_fake_target: fake_loss = (fake_loss + ((1 - mask) * F.softplus((- discr_fake_pred)))) sum_discr_loss = ((real_loss + grad_penalty) + fake_loss) metrics = dict(discr_real_out=discr_real_pred.mean(), discr_fake_out=discr_fake_pred.mean(), discr_real_gp=grad_penalty) return (sum_discr_loss.mean(), metrics)
class BCELoss(BaseAdversarialLoss): def __init__(self, weight): self.weight = weight self.bce_loss = nn.BCEWithLogitsLoss() def generator_loss(self, discr_fake_pred: torch.Tensor) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: real_mask_gt = torch.zeros(discr_fake_pred.shape).to(discr_fake_pred.device) fake_loss = (self.bce_loss(discr_fake_pred, real_mask_gt) * self.weight) return (fake_loss, dict()) def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, generator: nn.Module, discriminator: nn.Module): real_batch.requires_grad = True def discriminator_loss(self, mask: torch.Tensor, discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: real_mask_gt = torch.zeros(discr_real_pred.shape).to(discr_real_pred.device) sum_discr_loss = ((self.bce_loss(discr_real_pred, real_mask_gt) + self.bce_loss(discr_fake_pred, mask)) / 2) metrics = dict(discr_real_out=discr_real_pred.mean(), discr_fake_out=discr_fake_pred.mean(), discr_real_gp=0) return (sum_discr_loss, metrics)
def make_discrim_loss(kind, **kwargs): if (kind == 'r1'): return NonSaturatingWithR1(**kwargs) elif (kind == 'bce'): return BCELoss(**kwargs) raise ValueError(f'Unknown adversarial loss kind {kind}')
def masked_l2_loss(pred, target, mask, weight_known, weight_missing): per_pixel_l2 = F.mse_loss(pred, target, reduction='none') pixel_weights = ((mask * weight_missing) + ((1 - mask) * weight_known)) return (pixel_weights * per_pixel_l2).mean()
def masked_l1_loss(pred, target, mask, weight_known, weight_missing): per_pixel_l1 = F.l1_loss(pred, target, reduction='none') pixel_weights = ((mask * weight_missing) + ((1 - mask) * weight_known)) return (pixel_weights * per_pixel_l1).mean()
def feature_matching_loss(fake_features: List[torch.Tensor], target_features: List[torch.Tensor], mask=None): if (mask is None): res = torch.stack([F.mse_loss(fake_feat, target_feat) for (fake_feat, target_feat) in zip(fake_features, target_features)]).mean() else: res = 0 norm = 0 for (fake_feat, target_feat) in zip(fake_features, target_features): cur_mask = F.interpolate(mask, size=fake_feat.shape[(- 2):], mode='bilinear', align_corners=False) error_weights = (1 - cur_mask) cur_val = ((fake_feat - target_feat).pow(2) * error_weights).mean() res = (res + cur_val) norm += 1 res = (res / norm) return res
class CrossEntropy2d(nn.Module): def __init__(self, reduction='mean', ignore_label=255, weights=None, *args, **kwargs): '\n weight (Tensor, optional): a manual rescaling weight given to each class.\n If given, has to be a Tensor of size "nclasses"\n ' super(CrossEntropy2d, self).__init__() self.reduction = reduction self.ignore_label = ignore_label self.weights = weights if (self.weights is not None): device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) self.weights = torch.FloatTensor(constant_weights[weights]).to(device) def forward(self, predict, target): '\n Args:\n predict:(n, c, h, w)\n target:(n, 1, h, w)\n ' target = target.long() assert (not target.requires_grad) assert (predict.dim() == 4), '{0}'.format(predict.size()) assert (target.dim() == 4), '{0}'.format(target.size()) assert (predict.size(0) == target.size(0)), '{0} vs {1} '.format(predict.size(0), target.size(0)) assert (target.size(1) == 1), '{0}'.format(target.size(1)) assert (predict.size(2) == target.size(2)), '{0} vs {1} '.format(predict.size(2), target.size(2)) assert (predict.size(3) == target.size(3)), '{0} vs {1} '.format(predict.size(3), target.size(3)) target = target.squeeze(1) (n, c, h, w) = predict.size() target_mask = ((target >= 0) * (target != self.ignore_label)) target = target[target_mask] predict = predict.transpose(1, 2).transpose(2, 3).contiguous() predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view((- 1), c) loss = F.cross_entropy(predict, target, weight=self.weights, reduction=self.reduction) return loss
class PerceptualLoss(nn.Module): '\n Perceptual loss, VGG-based\n https://arxiv.org/abs/1603.08155\n https://github.com/dxyang/StyleTransfer/blob/master/utils.py\n ' def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]): super(PerceptualLoss, self).__init__() self.add_module('vgg', VGG19()) self.criterion = torch.nn.L1Loss() self.weights = weights def __call__(self, x, y): (x_vgg, y_vgg) = (self.vgg(x), self.vgg(y)) content_loss = 0.0 content_loss += (self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])) content_loss += (self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])) content_loss += (self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])) content_loss += (self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])) content_loss += (self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])) return content_loss
class VGG19(torch.nn.Module): def __init__(self): super(VGG19, self).__init__() features = models.vgg19(pretrained=True).features self.relu1_1 = torch.nn.Sequential() self.relu1_2 = torch.nn.Sequential() self.relu2_1 = torch.nn.Sequential() self.relu2_2 = torch.nn.Sequential() self.relu3_1 = torch.nn.Sequential() self.relu3_2 = torch.nn.Sequential() self.relu3_3 = torch.nn.Sequential() self.relu3_4 = torch.nn.Sequential() self.relu4_1 = torch.nn.Sequential() self.relu4_2 = torch.nn.Sequential() self.relu4_3 = torch.nn.Sequential() self.relu4_4 = torch.nn.Sequential() self.relu5_1 = torch.nn.Sequential() self.relu5_2 = torch.nn.Sequential() self.relu5_3 = torch.nn.Sequential() self.relu5_4 = torch.nn.Sequential() for x in range(2): self.relu1_1.add_module(str(x), features[x]) for x in range(2, 4): self.relu1_2.add_module(str(x), features[x]) for x in range(4, 7): self.relu2_1.add_module(str(x), features[x]) for x in range(7, 9): self.relu2_2.add_module(str(x), features[x]) for x in range(9, 12): self.relu3_1.add_module(str(x), features[x]) for x in range(12, 14): self.relu3_2.add_module(str(x), features[x]) for x in range(14, 16): self.relu3_2.add_module(str(x), features[x]) for x in range(16, 18): self.relu3_4.add_module(str(x), features[x]) for x in range(18, 21): self.relu4_1.add_module(str(x), features[x]) for x in range(21, 23): self.relu4_2.add_module(str(x), features[x]) for x in range(23, 25): self.relu4_3.add_module(str(x), features[x]) for x in range(25, 27): self.relu4_4.add_module(str(x), features[x]) for x in range(27, 30): self.relu5_1.add_module(str(x), features[x]) for x in range(30, 32): self.relu5_2.add_module(str(x), features[x]) for x in range(32, 34): self.relu5_3.add_module(str(x), features[x]) for x in range(34, 36): self.relu5_4.add_module(str(x), features[x]) for param in self.parameters(): param.requires_grad = False def forward(self, x): relu1_1 = self.relu1_1(x) relu1_2 = self.relu1_2(relu1_1) relu2_1 = self.relu2_1(relu1_2) relu2_2 = self.relu2_2(relu2_1) relu3_1 = self.relu3_1(relu2_2) relu3_2 = self.relu3_2(relu3_1) relu3_3 = self.relu3_3(relu3_2) relu3_4 = self.relu3_4(relu3_3) relu4_1 = self.relu4_1(relu3_4) relu4_2 = self.relu4_2(relu4_1) relu4_3 = self.relu4_3(relu4_2) relu4_4 = self.relu4_4(relu4_3) relu5_1 = self.relu5_1(relu4_4) relu5_2 = self.relu5_2(relu5_1) relu5_3 = self.relu5_3(relu5_2) relu5_4 = self.relu5_4(relu5_3) out = {'relu1_1': relu1_1, 'relu1_2': relu1_2, 'relu2_1': relu2_1, 'relu2_2': relu2_2, 'relu3_1': relu3_1, 'relu3_2': relu3_2, 'relu3_3': relu3_3, 'relu3_4': relu3_4, 'relu4_1': relu4_1, 'relu4_2': relu4_2, 'relu4_3': relu4_3, 'relu4_4': relu4_4, 'relu5_1': relu5_1, 'relu5_2': relu5_2, 'relu5_3': relu5_3, 'relu5_4': relu5_4} return out
def make_generator(config, kind, **kwargs): logging.info(f'Make generator {kind}') if (kind == 'pix2pixhd_multidilated'): return MultiDilatedGlobalGenerator(**kwargs) if (kind == 'pix2pixhd_global'): return GlobalGenerator(**kwargs) if (kind == 'ffc_resnet'): return FFCResNetGenerator(**kwargs) if (kind == 'ffc_resnet_multiframe'): return FFCResNetGeneratorMultiframe(**kwargs) from gi.main import instantiate_from_config return instantiate_from_config({'target': kind, 'params': kwargs}) raise ValueError(f'Unknown generator kind {kind}')
def make_discriminator(kind, **kwargs): logging.info(f'Make discriminator {kind}') if (kind == 'pix2pixhd_nlayer_multidilated'): return MultidilatedNLayerDiscriminator(**kwargs) if (kind == 'pix2pixhd_nlayer'): return NLayerDiscriminator(**kwargs) raise ValueError(f'Unknown discriminator kind {kind}')
class BaseDiscriminator(nn.Module): @abc.abstractmethod def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, List[torch.Tensor])]: '\n Predict scores and get intermediate activations. Useful for feature matching loss\n :return tuple (scores, list of intermediate activations)\n ' raise NotImplemented()
def get_conv_block_ctor(kind='default'): if (not isinstance(kind, str)): return kind if (kind == 'default'): return nn.Conv2d if (kind == 'depthwise'): return DepthWiseSeperableConv if (kind == 'multidilated'): return MultidilatedConv raise ValueError(f'Unknown convolutional block kind {kind}')
def get_norm_layer(kind='bn'): if (not isinstance(kind, str)): return kind if (kind == 'bn'): return nn.BatchNorm2d if (kind == 'in'): return nn.InstanceNorm2d raise ValueError(f'Unknown norm block kind {kind}')
def get_activation(kind='tanh'): if (kind == 'tanh'): return nn.Tanh() if (kind == 'sigmoid'): return nn.Sigmoid() if (kind is False): return nn.Identity() raise ValueError(f'Unknown activation kind {kind}')
class SimpleMultiStepGenerator(nn.Module): def __init__(self, steps: List[nn.Module]): super().__init__() self.steps = nn.ModuleList(steps) def forward(self, x): cur_in = x outs = [] for step in self.steps: cur_out = step(cur_in) outs.append(cur_out) cur_in = torch.cat((cur_in, cur_out), dim=1) return torch.cat(outs[::(- 1)], dim=1)
def deconv_factory(kind, ngf, mult, norm_layer, activation, max_features): if (kind == 'convtranspose'): return [nn.ConvTranspose2d(min(max_features, (ngf * mult)), min(max_features, int(((ngf * mult) / 2))), kernel_size=3, stride=2, padding=1, output_padding=1), norm_layer(min(max_features, int(((ngf * mult) / 2)))), activation] elif (kind == 'bilinear'): return [nn.Upsample(scale_factor=2, mode='bilinear'), DepthWiseSeperableConv(min(max_features, (ngf * mult)), min(max_features, int(((ngf * mult) / 2))), kernel_size=3, stride=1, padding=1), norm_layer(min(max_features, int(((ngf * mult) / 2)))), activation] else: raise Exception(f'Invalid deconv kind: {kind}')
class DepthWiseSeperableConv(nn.Module): def __init__(self, in_dim, out_dim, *args, **kwargs): super().__init__() if ('groups' in kwargs): del kwargs['groups'] self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs) self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1) def forward(self, x): out = self.depthwise(x) out = self.pointwise(out) return out
class ResNetHead(nn.Module): def __init__(self, input_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True)): assert (n_blocks >= 0) super(ResNetHead, self).__init__() conv_layer = get_conv_block_ctor(conv_kind) model = [nn.ReflectionPad2d(3), conv_layer(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] for i in range(n_downsampling): mult = (2 ** i) model += [conv_layer((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1), norm_layer(((ngf * mult) * 2)), activation] mult = (2 ** n_downsampling) for i in range(n_blocks): model += [ResnetBlock((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer, conv_kind=conv_kind)] self.model = nn.Sequential(*model) def forward(self, input): return self.model(input)
class ResNetTail(nn.Module): def __init__(self, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0, add_in_proj=None): assert (n_blocks >= 0) super(ResNetTail, self).__init__() mult = (2 ** n_downsampling) model = [] if (add_in_proj is not None): model.append(nn.Conv2d(add_in_proj, (ngf * mult), kernel_size=1)) for i in range(n_blocks): model += [ResnetBlock((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer, conv_kind=conv_kind)] for i in range(n_downsampling): mult = (2 ** (n_downsampling - i)) model += [nn.ConvTranspose2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=2, padding=1, output_padding=1), up_norm_layer(int(((ngf * mult) / 2))), up_activation] self.model = nn.Sequential(*model) out_layers = [] for _ in range(out_extra_layers_n): out_layers += [nn.Conv2d(ngf, ngf, kernel_size=1, padding=0), up_norm_layer(ngf), up_activation] out_layers += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] if add_out_act: out_layers.append(get_activation(('tanh' if (add_out_act is True) else add_out_act))) self.out_proj = nn.Sequential(*out_layers) def forward(self, input, return_last_act=False): features = self.model(input) out = self.out_proj(features) if return_last_act: return (out, features) else: return out
class MultiscaleResNet(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=2, n_blocks_head=2, n_blocks_tail=6, n_scales=3, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0, out_cumulative=False, return_only_hr=False): super().__init__() self.heads = nn.ModuleList([ResNetHead(input_nc, ngf=ngf, n_downsampling=n_downsampling, n_blocks=n_blocks_head, norm_layer=norm_layer, padding_type=padding_type, conv_kind=conv_kind, activation=activation) for i in range(n_scales)]) tail_in_feats = ((ngf * (2 ** n_downsampling)) + ngf) self.tails = nn.ModuleList([ResNetTail(output_nc, ngf=ngf, n_downsampling=n_downsampling, n_blocks=n_blocks_tail, norm_layer=norm_layer, padding_type=padding_type, conv_kind=conv_kind, activation=activation, up_norm_layer=up_norm_layer, up_activation=up_activation, add_out_act=add_out_act, out_extra_layers_n=out_extra_layers_n, add_in_proj=(None if (i == (n_scales - 1)) else tail_in_feats)) for i in range(n_scales)]) self.out_cumulative = out_cumulative self.return_only_hr = return_only_hr @property def num_scales(self): return len(self.heads) def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int]=None) -> Union[(torch.Tensor, List[torch.Tensor])]: '\n :param ms_inputs: List of inputs of different resolutions from HR to LR\n :param smallest_scales_num: int or None, number of smallest scales to take at input\n :return: Depending on return_only_hr:\n True: Only the most HR output\n False: List of outputs of different resolutions from HR to LR\n ' if (smallest_scales_num is None): assert (len(self.heads) == len(ms_inputs)), (len(self.heads), len(ms_inputs), smallest_scales_num) smallest_scales_num = len(self.heads) else: assert (smallest_scales_num == len(ms_inputs) <= len(self.heads)), (len(self.heads), len(ms_inputs), smallest_scales_num) cur_heads = self.heads[(- smallest_scales_num):] ms_features = [cur_head(cur_inp) for (cur_head, cur_inp) in zip(cur_heads, ms_inputs)] all_outputs = [] prev_tail_features = None for i in range(len(ms_features)): scale_i = ((- i) - 1) cur_tail_input = ms_features[((- i) - 1)] if (prev_tail_features is not None): if (prev_tail_features.shape != cur_tail_input.shape): prev_tail_features = F.interpolate(prev_tail_features, size=cur_tail_input.shape[2:], mode='bilinear', align_corners=False) cur_tail_input = torch.cat((cur_tail_input, prev_tail_features), dim=1) (cur_out, cur_tail_feats) = self.tails[scale_i](cur_tail_input, return_last_act=True) prev_tail_features = cur_tail_feats all_outputs.append(cur_out) if self.out_cumulative: all_outputs_cum = [all_outputs[0]] for i in range(1, len(ms_features)): cur_out = all_outputs[i] cur_out_cum = (cur_out + F.interpolate(all_outputs_cum[(- 1)], size=cur_out.shape[2:], mode='bilinear', align_corners=False)) all_outputs_cum.append(cur_out_cum) all_outputs = all_outputs_cum if self.return_only_hr: return all_outputs[(- 1)] else: return all_outputs[::(- 1)]
class MultiscaleDiscriminatorSimple(nn.Module): def __init__(self, ms_impl): super().__init__() self.ms_impl = nn.ModuleList(ms_impl) @property def num_scales(self): return len(self.ms_impl) def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int]=None) -> List[Tuple[(torch.Tensor, List[torch.Tensor])]]: '\n :param ms_inputs: List of inputs of different resolutions from HR to LR\n :param smallest_scales_num: int or None, number of smallest scales to take at input\n :return: List of pairs (prediction, features) for different resolutions from HR to LR\n ' if (smallest_scales_num is None): assert (len(self.ms_impl) == len(ms_inputs)), (len(self.ms_impl), len(ms_inputs), smallest_scales_num) smallest_scales_num = len(self.heads) else: assert (smallest_scales_num == len(ms_inputs) <= len(self.ms_impl)), (len(self.ms_impl), len(ms_inputs), smallest_scales_num) return [cur_discr(cur_input) for (cur_discr, cur_input) in zip(self.ms_impl[(- smallest_scales_num):], ms_inputs)]
class SingleToMultiScaleInputMixin(): def forward(self, x: torch.Tensor) -> List: (orig_height, orig_width) = x.shape[2:] factors = [(2 ** i) for i in range(self.num_scales)] ms_inputs = [F.interpolate(x, size=((orig_height // f), (orig_width // f)), mode='bilinear', align_corners=False) for f in factors] return super().forward(ms_inputs)
class GeneratorMultiToSingleOutputMixin(): def forward(self, x): return super().forward(x)[0]
class DiscriminatorMultiToSingleOutputMixin(): def forward(self, x): out_feat_tuples = super().forward(x) return (out_feat_tuples[0][0], [f for (_, flist) in out_feat_tuples for f in flist])
class DiscriminatorMultiToSingleOutputStackedMixin(): def __init__(self, *args, return_feats_only_levels=None, **kwargs): super().__init__(*args, **kwargs) self.return_feats_only_levels = return_feats_only_levels def forward(self, x): out_feat_tuples = super().forward(x) outs = [out for (out, _) in out_feat_tuples] scaled_outs = ([outs[0]] + [F.interpolate(cur_out, size=outs[0].shape[(- 2):], mode='bilinear', align_corners=False) for cur_out in outs[1:]]) out = torch.cat(scaled_outs, dim=1) if (self.return_feats_only_levels is not None): feat_lists = [out_feat_tuples[i][1] for i in self.return_feats_only_levels] else: feat_lists = [flist for (_, flist) in out_feat_tuples] feats = [f for flist in feat_lists for f in flist] return (out, feats)
class MultiscaleDiscrSingleInput(SingleToMultiScaleInputMixin, DiscriminatorMultiToSingleOutputStackedMixin, MultiscaleDiscriminatorSimple): pass
class MultiscaleResNetSingle(GeneratorMultiToSingleOutputMixin, SingleToMultiScaleInputMixin, MultiscaleResNet): pass
class DotDict(defaultdict): 'dot.notation access to dictionary attributes' __getattr__ = defaultdict.get __setattr__ = defaultdict.__setitem__ __delattr__ = defaultdict.__delitem__
class Identity(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x
class ResnetBlock(nn.Module): def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default', dilation=1, in_dim=None, groups=1, second_dilation=None): super(ResnetBlock, self).__init__() self.in_dim = in_dim self.dim = dim if (second_dilation is None): second_dilation = dilation self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout, conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups, second_dilation=second_dilation) if (self.in_dim is not None): self.input_conv = nn.Conv2d(in_dim, dim, 1) self.out_channnels = dim def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default', dilation=1, in_dim=None, groups=1, second_dilation=1): conv_layer = get_conv_block_ctor(conv_kind) conv_block = [] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d(dilation)] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d(dilation)] elif (padding_type == 'zero'): p = dilation else: raise NotImplementedError(('padding [%s] is not implemented' % padding_type)) if (in_dim is None): in_dim = dim conv_block += [conv_layer(in_dim, dim, kernel_size=3, padding=p, dilation=dilation), norm_layer(dim), activation] if use_dropout: conv_block += [nn.Dropout(0.5)] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d(second_dilation)] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d(second_dilation)] elif (padding_type == 'zero'): p = second_dilation else: raise NotImplementedError(('padding [%s] is not implemented' % padding_type)) conv_block += [conv_layer(dim, dim, kernel_size=3, padding=p, dilation=second_dilation, groups=groups), norm_layer(dim)] return nn.Sequential(*conv_block) def forward(self, x): x_before = x if (self.in_dim is not None): x = self.input_conv(x) out = (x + self.conv_block(x_before)) return out
class ResnetBlock5x5(nn.Module): def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default', dilation=1, in_dim=None, groups=1, second_dilation=None): super(ResnetBlock5x5, self).__init__() self.in_dim = in_dim self.dim = dim if (second_dilation is None): second_dilation = dilation self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout, conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups, second_dilation=second_dilation) if (self.in_dim is not None): self.input_conv = nn.Conv2d(in_dim, dim, 1) self.out_channnels = dim def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default', dilation=1, in_dim=None, groups=1, second_dilation=1): conv_layer = get_conv_block_ctor(conv_kind) conv_block = [] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d((dilation * 2))] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d((dilation * 2))] elif (padding_type == 'zero'): p = (dilation * 2) else: raise NotImplementedError(('padding [%s] is not implemented' % padding_type)) if (in_dim is None): in_dim = dim conv_block += [conv_layer(in_dim, dim, kernel_size=5, padding=p, dilation=dilation), norm_layer(dim), activation] if use_dropout: conv_block += [nn.Dropout(0.5)] p = 0 if (padding_type == 'reflect'): conv_block += [nn.ReflectionPad2d((second_dilation * 2))] elif (padding_type == 'replicate'): conv_block += [nn.ReplicationPad2d((second_dilation * 2))] elif (padding_type == 'zero'): p = (second_dilation * 2) else: raise NotImplementedError(('padding [%s] is not implemented' % padding_type)) conv_block += [conv_layer(dim, dim, kernel_size=5, padding=p, dilation=second_dilation, groups=groups), norm_layer(dim)] return nn.Sequential(*conv_block) def forward(self, x): x_before = x if (self.in_dim is not None): x = self.input_conv(x) out = (x + self.conv_block(x_before)) return out
class MultidilatedResnetBlock(nn.Module): def __init__(self, dim, padding_type, conv_layer, norm_layer, activation=nn.ReLU(True), use_dropout=False): super().__init__() self.conv_block = self.build_conv_block(dim, padding_type, conv_layer, norm_layer, activation, use_dropout) def build_conv_block(self, dim, padding_type, conv_layer, norm_layer, activation, use_dropout, dilation=1): conv_block = [] conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type), norm_layer(dim), activation] if use_dropout: conv_block += [nn.Dropout(0.5)] conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type), norm_layer(dim)] return nn.Sequential(*conv_block) def forward(self, x): out = (x + self.conv_block(x)) return out
class MultiDilatedGlobalGenerator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=3, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', deconv_kind='convtranspose', activation=nn.ReLU(True), up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True), add_out_act=True, max_features=1024, multidilation_kwargs={}, ffc_positions=None, ffc_kwargs={}): assert (n_blocks >= 0) super().__init__() conv_layer = get_conv_block_ctor(conv_kind) resnet_conv_layer = functools.partial(get_conv_block_ctor('multidilated'), **multidilation_kwargs) norm_layer = get_norm_layer(norm_layer) if (affine is not None): norm_layer = partial(norm_layer, affine=affine) up_norm_layer = get_norm_layer(up_norm_layer) if (affine is not None): up_norm_layer = partial(up_norm_layer, affine=affine) model = [nn.ReflectionPad2d(3), conv_layer(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] identity = Identity() for i in range(n_downsampling): mult = (2 ** i) model += [conv_layer(min(max_features, (ngf * mult)), min(max_features, ((ngf * mult) * 2)), kernel_size=3, stride=2, padding=1), norm_layer(min(max_features, ((ngf * mult) * 2))), activation] mult = (2 ** n_downsampling) feats_num_bottleneck = min(max_features, (ngf * mult)) for i in range(n_blocks): if ((ffc_positions is not None) and (i in ffc_positions)): model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU, inline=True, **ffc_kwargs)] model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type, conv_layer=resnet_conv_layer, activation=activation, norm_layer=norm_layer)] for i in range(n_downsampling): mult = (2 ** (n_downsampling - i)) model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features) model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] if add_out_act: model.append(get_activation(('tanh' if (add_out_act is True) else add_out_act))) self.model = nn.Sequential(*model) def forward(self, input): return self.model(input)
class ConfigGlobalGenerator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=3, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', deconv_kind='convtranspose', activation=nn.ReLU(True), up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True), add_out_act=True, max_features=1024, manual_block_spec=[], resnet_block_kind='multidilatedresnetblock', resnet_conv_kind='multidilated', resnet_dilation=1, multidilation_kwargs={}): assert (n_blocks >= 0) super().__init__() conv_layer = get_conv_block_ctor(conv_kind) resnet_conv_layer = functools.partial(get_conv_block_ctor(resnet_conv_kind), **multidilation_kwargs) norm_layer = get_norm_layer(norm_layer) if (affine is not None): norm_layer = partial(norm_layer, affine=affine) up_norm_layer = get_norm_layer(up_norm_layer) if (affine is not None): up_norm_layer = partial(up_norm_layer, affine=affine) model = [nn.ReflectionPad2d(3), conv_layer(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] identity = Identity() for i in range(n_downsampling): mult = (2 ** i) model += [conv_layer(min(max_features, (ngf * mult)), min(max_features, ((ngf * mult) * 2)), kernel_size=3, stride=2, padding=1), norm_layer(min(max_features, ((ngf * mult) * 2))), activation] mult = (2 ** n_downsampling) feats_num_bottleneck = min(max_features, (ngf * mult)) if (len(manual_block_spec) == 0): manual_block_spec = [DotDict((lambda : None), {'n_blocks': n_blocks, 'use_default': True})] for block_spec in manual_block_spec: def make_and_add_blocks(model, block_spec): block_spec = DotDict((lambda : None), block_spec) if (not block_spec.use_default): resnet_conv_layer = functools.partial(get_conv_block_ctor(block_spec.resnet_conv_kind), **block_spec.multidilation_kwargs) resnet_conv_kind = block_spec.resnet_conv_kind resnet_block_kind = block_spec.resnet_block_kind if (block_spec.resnet_dilation is not None): resnet_dilation = block_spec.resnet_dilation for i in range(block_spec.n_blocks): if (resnet_block_kind == 'multidilatedresnetblock'): model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type, conv_layer=resnet_conv_layer, activation=activation, norm_layer=norm_layer)] if (resnet_block_kind == 'resnetblock'): model += [ResnetBlock((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer, conv_kind=resnet_conv_kind)] if (resnet_block_kind == 'resnetblock5x5'): model += [ResnetBlock5x5((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer, conv_kind=resnet_conv_kind)] if (resnet_block_kind == 'resnetblockdwdil'): model += [ResnetBlock((ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer, conv_kind=resnet_conv_kind, dilation=resnet_dilation, second_dilation=resnet_dilation)] make_and_add_blocks(model, block_spec) for i in range(n_downsampling): mult = (2 ** (n_downsampling - i)) model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features) model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] if add_out_act: model.append(get_activation(('tanh' if (add_out_act is True) else add_out_act))) self.model = nn.Sequential(*model) def forward(self, input): return self.model(input)
def make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs): blocks = [] for i in range(dilated_blocks_n): if (dilation_block_kind == 'simple'): blocks.append(ResnetBlock(**dilated_block_kwargs, dilation=(2 ** (i + 1)))) elif (dilation_block_kind == 'multi'): blocks.append(MultidilatedResnetBlock(**dilated_block_kwargs)) else: raise ValueError(f'dilation_block_kind could not be "{dilation_block_kind}"') return blocks
class GlobalGenerator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True), dilated_blocks_n=0, dilated_blocks_n_start=0, dilated_blocks_n_middle=0, add_out_act=True, max_features=1024, is_resblock_depthwise=False, ffc_positions=None, ffc_kwargs={}, dilation=1, second_dilation=None, dilation_block_kind='simple', multidilation_kwargs={}): assert (n_blocks >= 0) super().__init__() conv_layer = get_conv_block_ctor(conv_kind) norm_layer = get_norm_layer(norm_layer) if (affine is not None): norm_layer = partial(norm_layer, affine=affine) up_norm_layer = get_norm_layer(up_norm_layer) if (affine is not None): up_norm_layer = partial(up_norm_layer, affine=affine) if (ffc_positions is not None): ffc_positions = collections.Counter(ffc_positions) model = [nn.ReflectionPad2d(3), conv_layer(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] identity = Identity() for i in range(n_downsampling): mult = (2 ** i) model += [conv_layer(min(max_features, (ngf * mult)), min(max_features, ((ngf * mult) * 2)), kernel_size=3, stride=2, padding=1), norm_layer(min(max_features, ((ngf * mult) * 2))), activation] mult = (2 ** n_downsampling) feats_num_bottleneck = min(max_features, (ngf * mult)) dilated_block_kwargs = dict(dim=feats_num_bottleneck, padding_type=padding_type, activation=activation, norm_layer=norm_layer) if (dilation_block_kind == 'simple'): dilated_block_kwargs['conv_kind'] = conv_kind elif (dilation_block_kind == 'multi'): dilated_block_kwargs['conv_layer'] = functools.partial(get_conv_block_ctor('multidilated'), **multidilation_kwargs) if ((dilated_blocks_n_start is not None) and (dilated_blocks_n_start > 0)): model += make_dil_blocks(dilated_blocks_n_start, dilation_block_kind, dilated_block_kwargs) for i in range(n_blocks): if ((i == (n_blocks // 2)) and (dilated_blocks_n_middle is not None) and (dilated_blocks_n_middle > 0)): model += make_dil_blocks(dilated_blocks_n_middle, dilation_block_kind, dilated_block_kwargs) if ((ffc_positions is not None) and (i in ffc_positions)): for _ in range(ffc_positions[i]): model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU, inline=True, **ffc_kwargs)] if is_resblock_depthwise: resblock_groups = feats_num_bottleneck else: resblock_groups = 1 model += [ResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation=activation, norm_layer=norm_layer, conv_kind=conv_kind, groups=resblock_groups, dilation=dilation, second_dilation=second_dilation)] if ((dilated_blocks_n is not None) and (dilated_blocks_n > 0)): model += make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs) for i in range(n_downsampling): mult = (2 ** (n_downsampling - i)) model += [nn.ConvTranspose2d(min(max_features, (ngf * mult)), min(max_features, int(((ngf * mult) / 2))), kernel_size=3, stride=2, padding=1, output_padding=1), up_norm_layer(min(max_features, int(((ngf * mult) / 2)))), up_activation] model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] if add_out_act: model.append(get_activation(('tanh' if (add_out_act is True) else add_out_act))) self.model = nn.Sequential(*model) def forward(self, input): return self.model(input)
class GlobalGeneratorGated(GlobalGenerator): def __init__(self, *args, **kwargs): real_kwargs = dict(conv_kind='gated_bn_relu', activation=nn.Identity(), norm_layer=nn.Identity) real_kwargs.update(kwargs) super().__init__(*args, **real_kwargs)
class GlobalGeneratorFromSuperChannels(nn.Module): def __init__(self, input_nc, output_nc, n_downsampling, n_blocks, super_channels, norm_layer='bn', padding_type='reflect', add_out_act=True): super().__init__() self.n_downsampling = n_downsampling norm_layer = get_norm_layer(norm_layer) if (type(norm_layer) == functools.partial): use_bias = (norm_layer.func == nn.InstanceNorm2d) else: use_bias = (norm_layer == nn.InstanceNorm2d) channels = self.convert_super_channels(super_channels) self.channels = channels model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, channels[0], kernel_size=7, padding=0, bias=use_bias), norm_layer(channels[0]), nn.ReLU(True)] for i in range(n_downsampling): mult = (2 ** i) model += [nn.Conv2d(channels[(0 + i)], channels[(1 + i)], kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(channels[(1 + i)]), nn.ReLU(True)] mult = (2 ** n_downsampling) n_blocks1 = (n_blocks // 3) n_blocks2 = n_blocks1 n_blocks3 = ((n_blocks - n_blocks1) - n_blocks2) for i in range(n_blocks1): c = n_downsampling dim = channels[c] model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer)] for i in range(n_blocks2): c = (n_downsampling + 1) dim = channels[c] kwargs = {} if (i == 0): kwargs = {'in_dim': channels[(c - 1)]} model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)] for i in range(n_blocks3): c = (n_downsampling + 2) dim = channels[c] kwargs = {} if (i == 0): kwargs = {'in_dim': channels[(c - 1)]} model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)] for i in range(n_downsampling): mult = (2 ** (n_downsampling - i)) model += [nn.ConvTranspose2d(channels[((n_downsampling + 3) + i)], channels[(((n_downsampling + 3) + i) + 1)], kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(channels[(((n_downsampling + 3) + i) + 1)]), nn.ReLU(True)] model += [nn.ReflectionPad2d(3)] model += [nn.Conv2d(channels[((2 * n_downsampling) + 3)], output_nc, kernel_size=7, padding=0)] if add_out_act: model.append(get_activation(('tanh' if (add_out_act is True) else add_out_act))) self.model = nn.Sequential(*model) def convert_super_channels(self, super_channels): n_downsampling = self.n_downsampling result = [] cnt = 0 if (n_downsampling == 2): N1 = 10 elif (n_downsampling == 3): N1 = 13 else: raise NotImplementedError for i in range(0, N1): if (i in [1, 4, 7, 10]): channel = (super_channels[cnt] * (2 ** cnt)) config = {'channel': channel} result.append(channel) logging.info(f'Downsample channels {result[(- 1)]}') cnt += 1 for i in range(3): for (counter, j) in enumerate(range((N1 + (i * 3)), ((N1 + 3) + (i * 3)))): if (len(super_channels) == 6): channel = (super_channels[3] * 4) else: channel = (super_channels[(i + 3)] * 4) config = {'channel': channel} if (counter == 0): result.append(channel) logging.info(f'Bottleneck channels {result[(- 1)]}') cnt = 2 for i in range((N1 + 9), (N1 + 21)): if (i in [22, 25, 28]): cnt -= 1 if (len(super_channels) == 6): channel = (super_channels[(5 - cnt)] * (2 ** cnt)) else: channel = (super_channels[(7 - cnt)] * (2 ** cnt)) result.append(int(channel)) logging.info(f'Upsample channels {result[(- 1)]}') return result def forward(self, input): return self.model(input)
class NLayerDiscriminator(BaseDiscriminator): def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): super().__init__() self.n_layers = n_layers kw = 4 padw = int(np.ceil(((kw - 1.0) / 2))) sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]] nf = ndf for n in range(1, n_layers): nf_prev = nf nf = min((nf * 2), 512) cur_model = [] cur_model += [nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), norm_layer(nf), nn.LeakyReLU(0.2, True)] sequence.append(cur_model) nf_prev = nf nf = min((nf * 2), 512) cur_model = [] cur_model += [nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), norm_layer(nf), nn.LeakyReLU(0.2, True)] sequence.append(cur_model) sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] for n in range(len(sequence)): setattr(self, ('model' + str(n)), nn.Sequential(*sequence[n])) def get_all_activations(self, x): res = [x] for n in range((self.n_layers + 2)): model = getattr(self, ('model' + str(n))) res.append(model(res[(- 1)])) return res[1:] def forward(self, x): act = self.get_all_activations(x) return (act[(- 1)], act[:(- 1)])
class MultidilatedNLayerDiscriminator(BaseDiscriminator): def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, multidilation_kwargs={}): super().__init__() self.n_layers = n_layers kw = 4 padw = int(np.ceil(((kw - 1.0) / 2))) sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]] nf = ndf for n in range(1, n_layers): nf_prev = nf nf = min((nf * 2), 512) cur_model = [] cur_model += [MultidilatedConv(nf_prev, nf, kernel_size=kw, stride=2, padding=[2, 3], **multidilation_kwargs), norm_layer(nf), nn.LeakyReLU(0.2, True)] sequence.append(cur_model) nf_prev = nf nf = min((nf * 2), 512) cur_model = [] cur_model += [nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), norm_layer(nf), nn.LeakyReLU(0.2, True)] sequence.append(cur_model) sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] for n in range(len(sequence)): setattr(self, ('model' + str(n)), nn.Sequential(*sequence[n])) def get_all_activations(self, x): res = [x] for n in range((self.n_layers + 2)): model = getattr(self, ('model' + str(n))) res.append(model(res[(- 1)])) return res[1:] def forward(self, x): act = self.get_all_activations(x) return (act[(- 1)], act[:(- 1)])
class NLayerDiscriminatorAsGen(NLayerDiscriminator): def forward(self, x): return super().forward(x)[0]
class SELayer(nn.Module): def __init__(self, channel, reduction=16): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(nn.Linear(channel, (channel // reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((channel // reduction), channel, bias=False), nn.Sigmoid()) def forward(self, x): (b, c, _, _) = x.size() y = self.avg_pool(x).view(b, c) y = self.fc(y).view(b, c, 1, 1) res = (x * y.expand_as(x)) return res
def get_training_model_class(kind): if (kind == 'default'): return DefaultInpaintingTrainingModule raise ValueError(f'Unknown trainer module {kind}')
def make_training_model(config): kind = config.training_model.kind kwargs = dict(config.training_model) kwargs.pop('kind') kwargs['use_ddp'] = (config.trainer.kwargs.get('accelerator', None) == 'ddp') logging.info(f'Make training model {kind}') cls = get_training_model_class(kind) return cls(config, **kwargs)
def load_checkpoint(train_config, path, map_location='cuda', strict=True): model: torch.nn.Module = make_training_model(train_config) state = torch.load(path, map_location=map_location) model.load_state_dict(state['state_dict'], strict=strict) model.on_load_checkpoint(state) return model
def make_optimizer(parameters, kind='adamw', **kwargs): if (kind == 'adam'): optimizer_class = torch.optim.Adam elif (kind == 'adamw'): optimizer_class = torch.optim.AdamW else: raise ValueError(f'Unknown optimizer kind {kind}') return optimizer_class(parameters, **kwargs)
def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999): with torch.no_grad(): res_params = dict(result.named_parameters()) new_params = dict(new_iterate_model.named_parameters()) for k in res_params.keys(): res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=(1 - decay))
def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'): (batch_size, _, height, width) = base_tensor.shape (cur_height, cur_width) = (height, width) result = [] align_corners = (False if (scale_mode in ('bilinear', 'bicubic')) else None) for _ in range(scales): cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device) cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners) result.append(cur_sample_scaled) cur_height //= 2 cur_width //= 2 return torch.cat(result, dim=1)
class BaseInpaintingTrainingModule(ptl.LightningModule): def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100, average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000, average_generator_period=10, store_discr_outputs_for_vis=False, **kwargs): super().__init__(*args, **kwargs) LOGGER.info('BaseInpaintingTrainingModule init called') self.config = config self.generator = make_generator(config, **self.config.generator) self.use_ddp = use_ddp if (not get_has_ddp_rank()): LOGGER.info(f'''Generator {self.generator}''') if (not predict_only): self.save_hyperparameters(self.config) self.discriminator = make_discriminator(**self.config.discriminator) self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial) self.visualizer = make_visualizer(**self.config.visualizer) self.val_evaluator = make_evaluator(**self.config.evaluator) self.test_evaluator = make_evaluator(**self.config.evaluator) if (not get_has_ddp_rank()): LOGGER.info(f'''Discriminator {self.discriminator}''') extra_val = self.config.data.get('extra_val', ()) if extra_val: self.extra_val_titles = list(extra_val) self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator) for k in extra_val}) else: self.extra_evaluators = {} self.average_generator = average_generator self.generator_avg_beta = generator_avg_beta self.average_generator_start_step = average_generator_start_step self.average_generator_period = average_generator_period self.generator_average = None self.last_generator_averaging_step = (- 1) self.store_discr_outputs_for_vis = store_discr_outputs_for_vis if (self.config.losses.get('l1', {'weight_known': 0})['weight_known'] > 0): self.loss_l1 = nn.L1Loss(reduction='none') if (self.config.losses.get('mse', {'weight': 0})['weight'] > 0): self.loss_mse = nn.MSELoss(reduction='none') if (self.config.losses.perceptual.weight > 0): self.loss_pl = PerceptualLoss() if (self.config.losses.get('resnet_pl', {'weight': 0})['weight'] > 0): self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl) else: self.loss_resnet_pl = None self.visualize_each_iters = visualize_each_iters LOGGER.info('BaseInpaintingTrainingModule init done') def configure_optimizers(self): discriminator_params = list(self.discriminator.parameters()) return [dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)), dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator))] def train_dataloader(self): kwargs = dict(self.config.data.train) if self.use_ddp: kwargs['ddp_kwargs'] = dict(num_replicas=(self.trainer.num_nodes * self.trainer.num_processes), rank=self.trainer.global_rank, shuffle=True) dataloader = make_default_train_dataloader(**self.config.data.train) return dataloader def val_dataloader(self): res = [make_default_val_dataloader(**self.config.data.val)] if (self.config.data.visual_test is not None): res = (res + [make_default_val_dataloader(**self.config.data.visual_test)]) else: res = (res + res) extra_val = self.config.data.get('extra_val', ()) if extra_val: res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles] return res def training_step(self, batch, batch_idx, optimizer_idx=None): self._is_training_step = True return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx) def validation_step(self, batch, batch_idx, dataloader_idx): extra_val_key = None if (dataloader_idx == 0): mode = 'val' elif (dataloader_idx == 1): mode = 'test' else: mode = 'extra_val' extra_val_key = self.extra_val_titles[(dataloader_idx - 2)] self._is_training_step = False return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key) def training_step_end(self, batch_parts_outputs): if (self.training and self.average_generator and (self.global_step >= self.average_generator_start_step) and (self.global_step >= (self.last_generator_averaging_step + self.average_generator_period))): if (self.generator_average is None): self.generator_average = copy.deepcopy(self.generator) else: update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta) self.last_generator_averaging_step = self.global_step full_loss = (batch_parts_outputs['loss'].mean() if torch.is_tensor(batch_parts_outputs['loss']) else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True)) log_info = {k: v.mean() for (k, v) in batch_parts_outputs['log_info'].items()} self.log_dict(log_info, on_step=True, on_epoch=False) return full_loss def validation_epoch_end(self, outputs): outputs = [step_out for out_group in outputs for step_out in out_group] averaged_logs = average_dicts((step_out['log_info'] for step_out in outputs)) self.log_dict({k: v.mean() for (k, v) in averaged_logs.items()}) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) val_evaluator_states = [s['val_evaluator_state'] for s in outputs if ('val_evaluator_state' in s)] val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states) val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0) val_evaluator_res_df.dropna(axis=1, how='all', inplace=True) LOGGER.info(f'''Validation metrics after epoch #{self.current_epoch}, total {self.global_step} iterations: {val_evaluator_res_df}''') for (k, v) in flatten_dict(val_evaluator_res).items(): self.log(f'val_{k}', v) test_evaluator_states = [s['test_evaluator_state'] for s in outputs if ('test_evaluator_state' in s)] test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states) test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0) test_evaluator_res_df.dropna(axis=1, how='all', inplace=True) LOGGER.info(f'''Test metrics after epoch #{self.current_epoch}, total {self.global_step} iterations: {test_evaluator_res_df}''') for (k, v) in flatten_dict(test_evaluator_res).items(): self.log(f'test_{k}', v) if self.extra_evaluators: for (cur_eval_title, cur_evaluator) in self.extra_evaluators.items(): cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state' cur_states = [s[cur_state_key] for s in outputs if (cur_state_key in s)] cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states) cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0) cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True) LOGGER.info(f'''Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, total {self.global_step} iterations: {cur_evaluator_res_df}''') for (k, v) in flatten_dict(cur_evaluator_res).items(): self.log(f'extra_val_{cur_eval_title}_{k}', v) def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None): if (optimizer_idx == 0): set_requires_grad(self.generator, True) set_requires_grad(self.discriminator, False) elif (optimizer_idx == 1): set_requires_grad(self.generator, False) set_requires_grad(self.discriminator, True) batch = self(batch) total_loss = 0 metrics = {} if ((optimizer_idx is None) or (optimizer_idx == 0)): (total_loss, metrics) = self.generator_loss(batch) elif ((optimizer_idx is None) or (optimizer_idx == 1)): if (self.config.losses.adversarial.weight > 0): (total_loss, metrics) = self.discriminator_loss(batch) if ((self.get_ddp_rank() in (None, 0)) and (((batch_idx % self.visualize_each_iters) == 0) or (mode == 'test'))): if (self.config.losses.adversarial.weight > 0): if self.store_discr_outputs_for_vis: with torch.no_grad(): self.store_discr_outputs(batch) vis_suffix = f'_{mode}' if (mode == 'extra_val'): vis_suffix += f'_{extra_val_key}' self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix) metrics_prefix = f'{mode}_' if (mode == 'extra_val'): metrics_prefix += f'{extra_val_key}_' result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix)) if (mode == 'val'): result['val_evaluator_state'] = self.val_evaluator.process_batch(batch) elif (mode == 'test'): result['test_evaluator_state'] = self.test_evaluator.process_batch(batch) elif (mode == 'extra_val'): result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch) return result def get_current_generator(self, no_average=False): if ((not no_average) and (not self.training) and self.average_generator and (self.generator_average is not None)): return self.generator_average return self.generator def forward(self, batch: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]: "Pass data through generator and obtain at leas 'predicted_image' and 'inpainted' keys" raise NotImplementedError() def generator_loss(self, batch) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: raise NotImplementedError() def discriminator_loss(self, batch) -> Tuple[(torch.Tensor, Dict[(str, torch.Tensor)])]: raise NotImplementedError() def store_discr_outputs(self, batch): out_size = batch['image'].shape[2:] (discr_real_out, _) = self.discriminator(batch['image']) (discr_fake_out, _) = self.discriminator(batch['predicted_image']) batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest') batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest') batch['discr_output_diff'] = (batch['discr_output_real'] - batch['discr_output_fake']) def get_ddp_rank(self): return (self.trainer.global_rank if ((self.trainer.num_nodes * self.trainer.num_processes) > 1) else None)
def make_visualizer(kind, **kwargs): logging.info(f'Make visualizer {kind}') if (kind == 'directory'): return DirectoryVisualizer(**kwargs) if (kind == 'noop'): return NoopVisualizer() raise ValueError(f'Unknown visualizer kind {kind}')
class BaseVisualizer(): @abc.abstractmethod def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): '\n Take a batch, make an image from it and visualize\n ' raise NotImplementedError()
def visualize_mask_and_images(images_dict: Dict[(str, np.ndarray)], keys: List[str], last_without_mask=True, rescale_keys=None, mask_only_first=None, black_mask=False) -> np.ndarray: mask = (images_dict['mask'] > 0.5) result = [] for (i, k) in enumerate(keys): img = images_dict[k] img = np.transpose(img, (1, 2, 0)) if ((rescale_keys is not None) and (k in rescale_keys)): img = (img - img.min()) img /= (img.max() + 1e-05) if (len(img.shape) == 2): img = np.expand_dims(img, 2) if (img.shape[2] == 1): img = np.repeat(img, 3, axis=2) elif (img.shape[2] > 3): img_classes = img.argmax(2) img = color.label2rgb(img_classes, colors=COLORS) if mask_only_first: need_mark_boundaries = (i == 0) else: need_mark_boundaries = ((i < (len(keys) - 1)) or (not last_without_mask)) if need_mark_boundaries: if black_mask: img = (img * (1 - mask[0][(..., None)])) img = mark_boundaries(img, mask[0], color=(1.0, 0.0, 0.0), outline_color=(1.0, 1.0, 1.0), mode='thick') result.append(img) return np.concatenate(result, axis=1)
def visualize_mask_and_images_batch(batch: Dict[(str, torch.Tensor)], keys: List[str], max_items=10, last_without_mask=True, rescale_keys=None) -> np.ndarray: batch = {k: tens.detach().cpu().numpy() for (k, tens) in batch.items() if ((k in keys) or (k == 'mask'))} batch_size = next(iter(batch.values())).shape[0] items_to_vis = min(batch_size, max_items) result = [] for i in range(items_to_vis): cur_dct = {k: tens[i] for (k, tens) in batch.items()} result.append(visualize_mask_and_images(cur_dct, keys, last_without_mask=last_without_mask, rescale_keys=rescale_keys)) return np.concatenate(result, axis=0)
def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False): "\n Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks\n :param nlabels: Number of labels (size of colormap)\n :param type: 'bright' for strong colors, 'soft' for pastel colors\n :param first_color_black: Option to use first color as black, True or False\n :param last_color_black: Option to use last color as black, True or False\n :param verbose: Prints the number of labels and shows the colormap. True or False\n :return: colormap for matplotlib\n " if (type not in ('bright', 'soft')): print('Please choose "bright" or "soft" for type') return if verbose: print(('Number of labels: ' + str(nlabels))) if (type == 'bright'): randHSVcolors = [(np.random.uniform(low=0.0, high=1), np.random.uniform(low=0.2, high=1), np.random.uniform(low=0.9, high=1)) for i in range(nlabels)] randRGBcolors = [] for HSVcolor in randHSVcolors: randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2])) if first_color_black: randRGBcolors[0] = [0, 0, 0] if last_color_black: randRGBcolors[(- 1)] = [0, 0, 0] random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) if (type == 'soft'): low = 0.6 high = 0.95 randRGBcolors = [(np.random.uniform(low=low, high=high), np.random.uniform(low=low, high=high), np.random.uniform(low=low, high=high)) for i in range(nlabels)] if first_color_black: randRGBcolors[0] = [0, 0, 0] if last_color_black: randRGBcolors[(- 1)] = [0, 0, 0] random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) if verbose: from matplotlib import colors, colorbar from matplotlib import pyplot as plt (fig, ax) = plt.subplots(1, 1, figsize=(15, 0.5)) bounds = np.linspace(0, nlabels, (nlabels + 1)) norm = colors.BoundaryNorm(bounds, nlabels) cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None, boundaries=bounds, format='%1i', orientation=u'horizontal') return (randRGBcolors, random_colormap)
class DirectoryVisualizer(BaseVisualizer): DEFAULT_KEY_ORDER = 'image predicted_image inpainted'.split(' ') def __init__(self, outdir, key_order=DEFAULT_KEY_ORDER, max_items_in_batch=10, last_without_mask=True, rescale_keys=None): self.outdir = outdir os.makedirs(self.outdir, exist_ok=True) self.key_order = key_order self.max_items_in_batch = max_items_in_batch self.last_without_mask = last_without_mask self.rescale_keys = rescale_keys def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): check_and_warn_input_range(batch['image'], 0, 1, 'DirectoryVisualizer target image') vis_img = visualize_mask_and_images_batch(batch, self.key_order, max_items=self.max_items_in_batch, last_without_mask=self.last_without_mask, rescale_keys=self.rescale_keys) vis_img = np.clip((vis_img * 255), 0, 255).astype('uint8') curoutdir = os.path.join(self.outdir, f'epoch{epoch_i:04d}{suffix}') os.makedirs(curoutdir, exist_ok=True) rank_suffix = (f'_r{rank}' if (rank is not None) else '') out_fname = os.path.join(curoutdir, f'batch{batch_i:07d}{rank_suffix}.jpg') vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR) cv2.imwrite(out_fname, vis_img)
class NoopVisualizer(BaseVisualizer): def __init__(self, *args, **kwargs): pass def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): pass
def check_and_warn_input_range(tensor, min_value, max_value, name): actual_min = tensor.min() actual_max = tensor.max() if ((actual_min < min_value) or (actual_max > max_value)): warnings.warn(f'{name} must be in {min_value}..{max_value} range, but it ranges {actual_min}..{actual_max}')
def sum_dict_with_prefix(target, cur_dict, prefix, default=0): for (k, v) in cur_dict.items(): target_key = (prefix + k) target[target_key] = (target.get(target_key, default) + v)
def average_dicts(dict_list): result = {} norm = 0.001 for dct in dict_list: sum_dict_with_prefix(result, dct, '') norm += 1 for k in list(result): result[k] /= norm return result
def add_prefix_to_keys(dct, prefix): return {(prefix + k): v for (k, v) in dct.items()}
def set_requires_grad(module, value): for param in module.parameters(): param.requires_grad = value
def flatten_dict(dct): result = {} for (k, v) in dct.items(): if isinstance(k, tuple): k = '_'.join(k) if isinstance(v, dict): for (sub_k, sub_v) in flatten_dict(v).items(): result[f'{k}_{sub_k}'] = sub_v else: result[k] = v return result
class LinearRamp(): def __init__(self, start_value=0, end_value=1, start_iter=(- 1), end_iter=0): self.start_value = start_value self.end_value = end_value self.start_iter = start_iter self.end_iter = end_iter def __call__(self, i): if (i < self.start_iter): return self.start_value if (i >= self.end_iter): return self.end_value part = ((i - self.start_iter) / (self.end_iter - self.start_iter)) return ((self.start_value * (1 - part)) + (self.end_value * part))
class LadderRamp(): def __init__(self, start_iters, values): self.start_iters = start_iters self.values = values assert (len(values) == (len(start_iters) + 1)), (len(values), len(start_iters)) def __call__(self, i): segment_i = bisect.bisect_right(self.start_iters, i) return self.values[segment_i]
def get_ramp(kind='ladder', **kwargs): if (kind == 'linear'): return LinearRamp(**kwargs) if (kind == 'ladder'): return LadderRamp(**kwargs) raise ValueError(f'Unexpected ramp kind: {kind}')
def print_traceback_handler(sig, frame): LOGGER.warning(f'Received signal {sig}') bt = ''.join(traceback.format_stack()) LOGGER.warning(f'''Requested stack trace: {bt}''')
def register_debug_signal_handlers(sig=signal.SIGUSR1, handler=print_traceback_handler): LOGGER.warning(f'Setting signal {sig} handler {handler}') signal.signal(sig, handler)
def handle_deterministic_config(config): seed = dict(config).get('seed', None) if (seed is None): return False seed_everything(seed) return True
def get_shape(t): if torch.is_tensor(t): return tuple(t.shape) elif isinstance(t, dict): return {n: get_shape(q) for (n, q) in t.items()} elif isinstance(t, (list, tuple)): return [get_shape(q) for q in t] elif isinstance(t, numbers.Number): return type(t) else: raise ValueError('unexpected type {}'.format(type(t)))
def get_has_ddp_rank(): master_port = os.environ.get('MASTER_PORT', None) node_rank = os.environ.get('NODE_RANK', None) local_rank = os.environ.get('LOCAL_RANK', None) world_size = os.environ.get('WORLD_SIZE', None) has_rank = ((master_port is not None) or (node_rank is not None) or (local_rank is not None) or (world_size is not None)) return has_rank
def handle_ddp_subprocess(): def main_decorator(main_func): @functools.wraps(main_func) def new_main(*args, **kwargs): parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) has_parent = (parent_cwd is not None) has_rank = get_has_ddp_rank() assert (has_parent == has_rank), f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' if has_parent: sys.argv.extend([f'hydra.run.dir={parent_cwd}']) main_func(*args, **kwargs) return new_main return main_decorator
def handle_ddp_parent_process(): parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) has_parent = (parent_cwd is not None) has_rank = get_has_ddp_rank() assert (has_parent == has_rank), f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' if (parent_cwd is None): os.environ['TRAINING_PARENT_WORK_DIR'] = os.getcwd() return has_parent