Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- RAVE-main/annotator/lama/saicinpainting/training/data/__init__.py +0 -0
- RAVE-main/annotator/lama/saicinpainting/training/data/masks.py +332 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/__init__.py +0 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/base.py +80 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/depthwise_sep_conv.py +17 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/fake_fakes.py +47 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/ffc.py +485 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/multidilated_conv.py +98 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/multiscale.py +244 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/pix2pixhd.py +669 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/spatial_transform.py +49 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/squeeze_excitation.py +20 -0
- RAVE-main/annotator/lama/saicinpainting/training/trainers/__init__.py +29 -0
- RAVE-main/annotator/lama/saicinpainting/training/trainers/base.py +293 -0
- RAVE-main/annotator/lama/saicinpainting/training/trainers/default.py +175 -0
- RAVE-main/annotator/lama/saicinpainting/training/visualizers/__init__.py +15 -0
- RAVE-main/annotator/lama/saicinpainting/training/visualizers/base.py +73 -0
- RAVE-main/annotator/lama/saicinpainting/training/visualizers/colors.py +76 -0
- RAVE-main/annotator/lama/saicinpainting/training/visualizers/directory.py +36 -0
- RAVE-main/annotator/lama/saicinpainting/training/visualizers/noop.py +9 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/ade.py +84 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/builder.py +169 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/chase_db1.py +27 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/hrf.py +27 -0
- RAVE-main/annotator/mmpkg/mmseg/models/backbones/hrnet.py +555 -0
- RAVE-main/annotator/mmpkg/mmseg/models/necks/__init__.py +4 -0
- RAVE-main/annotator/mmpkg/mmseg/models/necks/fpn.py +212 -0
- RAVE-main/annotator/mmpkg/mmseg/models/necks/multilevel_neck.py +70 -0
- RAVE-main/annotator/uniformer/LICENSE +203 -0
- RAVE-main/annotator/uniformer/__init__.py +56 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/ade20k.py +54 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/chase_db1.py +59 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/cityscapes.py +54 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py +35 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/drive.py +59 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/hrf.py +59 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_context.py +60 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py +60 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_voc12.py +57 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_voc12_aug.py +9 -0
- RAVE-main/annotator/uniformer/configs/_base_/datasets/stare.py +59 -0
- RAVE-main/annotator/uniformer/configs/_base_/default_runtime.py +14 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/ann_r50-d8.py +46 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py +44 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py +44 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/cgnet.py +35 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/danet_r50-d8.py +44 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py +44 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py +50 -0
- RAVE-main/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py +46 -0
RAVE-main/annotator/lama/saicinpainting/training/data/__init__.py
ADDED
|
File without changes
|
RAVE-main/annotator/lama/saicinpainting/training/data/masks.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import random
|
| 3 |
+
import hashlib
|
| 4 |
+
import logging
|
| 5 |
+
from enum import Enum
|
| 6 |
+
|
| 7 |
+
import cv2
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
# from annotator.lama.saicinpainting.evaluation.masks.mask import SegmentationMask
|
| 11 |
+
from annotator.lama.saicinpainting.utils import LinearRamp
|
| 12 |
+
|
| 13 |
+
LOGGER = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class DrawMethod(Enum):
|
| 17 |
+
LINE = 'line'
|
| 18 |
+
CIRCLE = 'circle'
|
| 19 |
+
SQUARE = 'square'
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def make_random_irregular_mask(shape, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10,
|
| 23 |
+
draw_method=DrawMethod.LINE):
|
| 24 |
+
draw_method = DrawMethod(draw_method)
|
| 25 |
+
|
| 26 |
+
height, width = shape
|
| 27 |
+
mask = np.zeros((height, width), np.float32)
|
| 28 |
+
times = np.random.randint(min_times, max_times + 1)
|
| 29 |
+
for i in range(times):
|
| 30 |
+
start_x = np.random.randint(width)
|
| 31 |
+
start_y = np.random.randint(height)
|
| 32 |
+
for j in range(1 + np.random.randint(5)):
|
| 33 |
+
angle = 0.01 + np.random.randint(max_angle)
|
| 34 |
+
if i % 2 == 0:
|
| 35 |
+
angle = 2 * 3.1415926 - angle
|
| 36 |
+
length = 10 + np.random.randint(max_len)
|
| 37 |
+
brush_w = 5 + np.random.randint(max_width)
|
| 38 |
+
end_x = np.clip((start_x + length * np.sin(angle)).astype(np.int32), 0, width)
|
| 39 |
+
end_y = np.clip((start_y + length * np.cos(angle)).astype(np.int32), 0, height)
|
| 40 |
+
if draw_method == DrawMethod.LINE:
|
| 41 |
+
cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_w)
|
| 42 |
+
elif draw_method == DrawMethod.CIRCLE:
|
| 43 |
+
cv2.circle(mask, (start_x, start_y), radius=brush_w, color=1., thickness=-1)
|
| 44 |
+
elif draw_method == DrawMethod.SQUARE:
|
| 45 |
+
radius = brush_w // 2
|
| 46 |
+
mask[start_y - radius:start_y + radius, start_x - radius:start_x + radius] = 1
|
| 47 |
+
start_x, start_y = end_x, end_y
|
| 48 |
+
return mask[None, ...]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class RandomIrregularMaskGenerator:
|
| 52 |
+
def __init__(self, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10, ramp_kwargs=None,
|
| 53 |
+
draw_method=DrawMethod.LINE):
|
| 54 |
+
self.max_angle = max_angle
|
| 55 |
+
self.max_len = max_len
|
| 56 |
+
self.max_width = max_width
|
| 57 |
+
self.min_times = min_times
|
| 58 |
+
self.max_times = max_times
|
| 59 |
+
self.draw_method = draw_method
|
| 60 |
+
self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None
|
| 61 |
+
|
| 62 |
+
def __call__(self, img, iter_i=None, raw_image=None):
|
| 63 |
+
coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1
|
| 64 |
+
cur_max_len = int(max(1, self.max_len * coef))
|
| 65 |
+
cur_max_width = int(max(1, self.max_width * coef))
|
| 66 |
+
cur_max_times = int(self.min_times + 1 + (self.max_times - self.min_times) * coef)
|
| 67 |
+
return make_random_irregular_mask(img.shape[1:], max_angle=self.max_angle, max_len=cur_max_len,
|
| 68 |
+
max_width=cur_max_width, min_times=self.min_times, max_times=cur_max_times,
|
| 69 |
+
draw_method=self.draw_method)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def make_random_rectangle_mask(shape, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3):
|
| 73 |
+
height, width = shape
|
| 74 |
+
mask = np.zeros((height, width), np.float32)
|
| 75 |
+
bbox_max_size = min(bbox_max_size, height - margin * 2, width - margin * 2)
|
| 76 |
+
times = np.random.randint(min_times, max_times + 1)
|
| 77 |
+
for i in range(times):
|
| 78 |
+
box_width = np.random.randint(bbox_min_size, bbox_max_size)
|
| 79 |
+
box_height = np.random.randint(bbox_min_size, bbox_max_size)
|
| 80 |
+
start_x = np.random.randint(margin, width - margin - box_width + 1)
|
| 81 |
+
start_y = np.random.randint(margin, height - margin - box_height + 1)
|
| 82 |
+
mask[start_y:start_y + box_height, start_x:start_x + box_width] = 1
|
| 83 |
+
return mask[None, ...]
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class RandomRectangleMaskGenerator:
|
| 87 |
+
def __init__(self, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3, ramp_kwargs=None):
|
| 88 |
+
self.margin = margin
|
| 89 |
+
self.bbox_min_size = bbox_min_size
|
| 90 |
+
self.bbox_max_size = bbox_max_size
|
| 91 |
+
self.min_times = min_times
|
| 92 |
+
self.max_times = max_times
|
| 93 |
+
self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None
|
| 94 |
+
|
| 95 |
+
def __call__(self, img, iter_i=None, raw_image=None):
|
| 96 |
+
coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1
|
| 97 |
+
cur_bbox_max_size = int(self.bbox_min_size + 1 + (self.bbox_max_size - self.bbox_min_size) * coef)
|
| 98 |
+
cur_max_times = int(self.min_times + (self.max_times - self.min_times) * coef)
|
| 99 |
+
return make_random_rectangle_mask(img.shape[1:], margin=self.margin, bbox_min_size=self.bbox_min_size,
|
| 100 |
+
bbox_max_size=cur_bbox_max_size, min_times=self.min_times,
|
| 101 |
+
max_times=cur_max_times)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class RandomSegmentationMaskGenerator:
|
| 105 |
+
def __init__(self, **kwargs):
|
| 106 |
+
self.impl = None # will be instantiated in first call (effectively in subprocess)
|
| 107 |
+
self.kwargs = kwargs
|
| 108 |
+
|
| 109 |
+
def __call__(self, img, iter_i=None, raw_image=None):
|
| 110 |
+
if self.impl is None:
|
| 111 |
+
self.impl = SegmentationMask(**self.kwargs)
|
| 112 |
+
|
| 113 |
+
masks = self.impl.get_masks(np.transpose(img, (1, 2, 0)))
|
| 114 |
+
masks = [m for m in masks if len(np.unique(m)) > 1]
|
| 115 |
+
return np.random.choice(masks)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def make_random_superres_mask(shape, min_step=2, max_step=4, min_width=1, max_width=3):
|
| 119 |
+
height, width = shape
|
| 120 |
+
mask = np.zeros((height, width), np.float32)
|
| 121 |
+
step_x = np.random.randint(min_step, max_step + 1)
|
| 122 |
+
width_x = np.random.randint(min_width, min(step_x, max_width + 1))
|
| 123 |
+
offset_x = np.random.randint(0, step_x)
|
| 124 |
+
|
| 125 |
+
step_y = np.random.randint(min_step, max_step + 1)
|
| 126 |
+
width_y = np.random.randint(min_width, min(step_y, max_width + 1))
|
| 127 |
+
offset_y = np.random.randint(0, step_y)
|
| 128 |
+
|
| 129 |
+
for dy in range(width_y):
|
| 130 |
+
mask[offset_y + dy::step_y] = 1
|
| 131 |
+
for dx in range(width_x):
|
| 132 |
+
mask[:, offset_x + dx::step_x] = 1
|
| 133 |
+
return mask[None, ...]
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class RandomSuperresMaskGenerator:
|
| 137 |
+
def __init__(self, **kwargs):
|
| 138 |
+
self.kwargs = kwargs
|
| 139 |
+
|
| 140 |
+
def __call__(self, img, iter_i=None):
|
| 141 |
+
return make_random_superres_mask(img.shape[1:], **self.kwargs)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
class DumbAreaMaskGenerator:
|
| 145 |
+
min_ratio = 0.1
|
| 146 |
+
max_ratio = 0.35
|
| 147 |
+
default_ratio = 0.225
|
| 148 |
+
|
| 149 |
+
def __init__(self, is_training):
|
| 150 |
+
#Parameters:
|
| 151 |
+
# is_training(bool): If true - random rectangular mask, if false - central square mask
|
| 152 |
+
self.is_training = is_training
|
| 153 |
+
|
| 154 |
+
def _random_vector(self, dimension):
|
| 155 |
+
if self.is_training:
|
| 156 |
+
lower_limit = math.sqrt(self.min_ratio)
|
| 157 |
+
upper_limit = math.sqrt(self.max_ratio)
|
| 158 |
+
mask_side = round((random.random() * (upper_limit - lower_limit) + lower_limit) * dimension)
|
| 159 |
+
u = random.randint(0, dimension-mask_side-1)
|
| 160 |
+
v = u+mask_side
|
| 161 |
+
else:
|
| 162 |
+
margin = (math.sqrt(self.default_ratio) / 2) * dimension
|
| 163 |
+
u = round(dimension/2 - margin)
|
| 164 |
+
v = round(dimension/2 + margin)
|
| 165 |
+
return u, v
|
| 166 |
+
|
| 167 |
+
def __call__(self, img, iter_i=None, raw_image=None):
|
| 168 |
+
c, height, width = img.shape
|
| 169 |
+
mask = np.zeros((height, width), np.float32)
|
| 170 |
+
x1, x2 = self._random_vector(width)
|
| 171 |
+
y1, y2 = self._random_vector(height)
|
| 172 |
+
mask[x1:x2, y1:y2] = 1
|
| 173 |
+
return mask[None, ...]
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class OutpaintingMaskGenerator:
|
| 177 |
+
def __init__(self, min_padding_percent:float=0.04, max_padding_percent:int=0.25, left_padding_prob:float=0.5, top_padding_prob:float=0.5,
|
| 178 |
+
right_padding_prob:float=0.5, bottom_padding_prob:float=0.5, is_fixed_randomness:bool=False):
|
| 179 |
+
"""
|
| 180 |
+
is_fixed_randomness - get identical paddings for the same image if args are the same
|
| 181 |
+
"""
|
| 182 |
+
self.min_padding_percent = min_padding_percent
|
| 183 |
+
self.max_padding_percent = max_padding_percent
|
| 184 |
+
self.probs = [left_padding_prob, top_padding_prob, right_padding_prob, bottom_padding_prob]
|
| 185 |
+
self.is_fixed_randomness = is_fixed_randomness
|
| 186 |
+
|
| 187 |
+
assert self.min_padding_percent <= self.max_padding_percent
|
| 188 |
+
assert self.max_padding_percent > 0
|
| 189 |
+
assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]"
|
| 190 |
+
assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}"
|
| 191 |
+
assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}"
|
| 192 |
+
if len([x for x in self.probs if x > 0]) == 1:
|
| 193 |
+
LOGGER.warning(f"Only one padding prob is greater than zero - {self.probs}. That means that the outpainting masks will be always on the same side")
|
| 194 |
+
|
| 195 |
+
def apply_padding(self, mask, coord):
|
| 196 |
+
mask[int(coord[0][0]*self.img_h):int(coord[1][0]*self.img_h),
|
| 197 |
+
int(coord[0][1]*self.img_w):int(coord[1][1]*self.img_w)] = 1
|
| 198 |
+
return mask
|
| 199 |
+
|
| 200 |
+
def get_padding(self, size):
|
| 201 |
+
n1 = int(self.min_padding_percent*size)
|
| 202 |
+
n2 = int(self.max_padding_percent*size)
|
| 203 |
+
return self.rnd.randint(n1, n2) / size
|
| 204 |
+
|
| 205 |
+
@staticmethod
|
| 206 |
+
def _img2rs(img):
|
| 207 |
+
arr = np.ascontiguousarray(img.astype(np.uint8))
|
| 208 |
+
str_hash = hashlib.sha1(arr).hexdigest()
|
| 209 |
+
res = hash(str_hash)%(2**32)
|
| 210 |
+
return res
|
| 211 |
+
|
| 212 |
+
def __call__(self, img, iter_i=None, raw_image=None):
|
| 213 |
+
c, self.img_h, self.img_w = img.shape
|
| 214 |
+
mask = np.zeros((self.img_h, self.img_w), np.float32)
|
| 215 |
+
at_least_one_mask_applied = False
|
| 216 |
+
|
| 217 |
+
if self.is_fixed_randomness:
|
| 218 |
+
assert raw_image is not None, f"Cant calculate hash on raw_image=None"
|
| 219 |
+
rs = self._img2rs(raw_image)
|
| 220 |
+
self.rnd = np.random.RandomState(rs)
|
| 221 |
+
else:
|
| 222 |
+
self.rnd = np.random
|
| 223 |
+
|
| 224 |
+
coords = [[
|
| 225 |
+
(0,0),
|
| 226 |
+
(1,self.get_padding(size=self.img_h))
|
| 227 |
+
],
|
| 228 |
+
[
|
| 229 |
+
(0,0),
|
| 230 |
+
(self.get_padding(size=self.img_w),1)
|
| 231 |
+
],
|
| 232 |
+
[
|
| 233 |
+
(0,1-self.get_padding(size=self.img_h)),
|
| 234 |
+
(1,1)
|
| 235 |
+
],
|
| 236 |
+
[
|
| 237 |
+
(1-self.get_padding(size=self.img_w),0),
|
| 238 |
+
(1,1)
|
| 239 |
+
]]
|
| 240 |
+
|
| 241 |
+
for pp, coord in zip(self.probs, coords):
|
| 242 |
+
if self.rnd.random() < pp:
|
| 243 |
+
at_least_one_mask_applied = True
|
| 244 |
+
mask = self.apply_padding(mask=mask, coord=coord)
|
| 245 |
+
|
| 246 |
+
if not at_least_one_mask_applied:
|
| 247 |
+
idx = self.rnd.choice(range(len(coords)), p=np.array(self.probs)/sum(self.probs))
|
| 248 |
+
mask = self.apply_padding(mask=mask, coord=coords[idx])
|
| 249 |
+
return mask[None, ...]
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
class MixedMaskGenerator:
|
| 253 |
+
def __init__(self, irregular_proba=1/3, irregular_kwargs=None,
|
| 254 |
+
box_proba=1/3, box_kwargs=None,
|
| 255 |
+
segm_proba=1/3, segm_kwargs=None,
|
| 256 |
+
squares_proba=0, squares_kwargs=None,
|
| 257 |
+
superres_proba=0, superres_kwargs=None,
|
| 258 |
+
outpainting_proba=0, outpainting_kwargs=None,
|
| 259 |
+
invert_proba=0):
|
| 260 |
+
self.probas = []
|
| 261 |
+
self.gens = []
|
| 262 |
+
|
| 263 |
+
if irregular_proba > 0:
|
| 264 |
+
self.probas.append(irregular_proba)
|
| 265 |
+
if irregular_kwargs is None:
|
| 266 |
+
irregular_kwargs = {}
|
| 267 |
+
else:
|
| 268 |
+
irregular_kwargs = dict(irregular_kwargs)
|
| 269 |
+
irregular_kwargs['draw_method'] = DrawMethod.LINE
|
| 270 |
+
self.gens.append(RandomIrregularMaskGenerator(**irregular_kwargs))
|
| 271 |
+
|
| 272 |
+
if box_proba > 0:
|
| 273 |
+
self.probas.append(box_proba)
|
| 274 |
+
if box_kwargs is None:
|
| 275 |
+
box_kwargs = {}
|
| 276 |
+
self.gens.append(RandomRectangleMaskGenerator(**box_kwargs))
|
| 277 |
+
|
| 278 |
+
if segm_proba > 0:
|
| 279 |
+
self.probas.append(segm_proba)
|
| 280 |
+
if segm_kwargs is None:
|
| 281 |
+
segm_kwargs = {}
|
| 282 |
+
self.gens.append(RandomSegmentationMaskGenerator(**segm_kwargs))
|
| 283 |
+
|
| 284 |
+
if squares_proba > 0:
|
| 285 |
+
self.probas.append(squares_proba)
|
| 286 |
+
if squares_kwargs is None:
|
| 287 |
+
squares_kwargs = {}
|
| 288 |
+
else:
|
| 289 |
+
squares_kwargs = dict(squares_kwargs)
|
| 290 |
+
squares_kwargs['draw_method'] = DrawMethod.SQUARE
|
| 291 |
+
self.gens.append(RandomIrregularMaskGenerator(**squares_kwargs))
|
| 292 |
+
|
| 293 |
+
if superres_proba > 0:
|
| 294 |
+
self.probas.append(superres_proba)
|
| 295 |
+
if superres_kwargs is None:
|
| 296 |
+
superres_kwargs = {}
|
| 297 |
+
self.gens.append(RandomSuperresMaskGenerator(**superres_kwargs))
|
| 298 |
+
|
| 299 |
+
if outpainting_proba > 0:
|
| 300 |
+
self.probas.append(outpainting_proba)
|
| 301 |
+
if outpainting_kwargs is None:
|
| 302 |
+
outpainting_kwargs = {}
|
| 303 |
+
self.gens.append(OutpaintingMaskGenerator(**outpainting_kwargs))
|
| 304 |
+
|
| 305 |
+
self.probas = np.array(self.probas, dtype='float32')
|
| 306 |
+
self.probas /= self.probas.sum()
|
| 307 |
+
self.invert_proba = invert_proba
|
| 308 |
+
|
| 309 |
+
def __call__(self, img, iter_i=None, raw_image=None):
|
| 310 |
+
kind = np.random.choice(len(self.probas), p=self.probas)
|
| 311 |
+
gen = self.gens[kind]
|
| 312 |
+
result = gen(img, iter_i=iter_i, raw_image=raw_image)
|
| 313 |
+
if self.invert_proba > 0 and random.random() < self.invert_proba:
|
| 314 |
+
result = 1 - result
|
| 315 |
+
return result
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def get_mask_generator(kind, kwargs):
|
| 319 |
+
if kind is None:
|
| 320 |
+
kind = "mixed"
|
| 321 |
+
if kwargs is None:
|
| 322 |
+
kwargs = {}
|
| 323 |
+
|
| 324 |
+
if kind == "mixed":
|
| 325 |
+
cl = MixedMaskGenerator
|
| 326 |
+
elif kind == "outpainting":
|
| 327 |
+
cl = OutpaintingMaskGenerator
|
| 328 |
+
elif kind == "dumb":
|
| 329 |
+
cl = DumbAreaMaskGenerator
|
| 330 |
+
else:
|
| 331 |
+
raise NotImplementedError(f"No such generator kind = {kind}")
|
| 332 |
+
return cl(**kwargs)
|
RAVE-main/annotator/lama/saicinpainting/training/losses/__init__.py
ADDED
|
File without changes
|
RAVE-main/annotator/lama/saicinpainting/training/modules/base.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
from typing import Tuple, List
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
|
| 7 |
+
from annotator.lama.saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv
|
| 8 |
+
from annotator.lama.saicinpainting.training.modules.multidilated_conv import MultidilatedConv
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class BaseDiscriminator(nn.Module):
|
| 12 |
+
@abc.abstractmethod
|
| 13 |
+
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
|
| 14 |
+
"""
|
| 15 |
+
Predict scores and get intermediate activations. Useful for feature matching loss
|
| 16 |
+
:return tuple (scores, list of intermediate activations)
|
| 17 |
+
"""
|
| 18 |
+
raise NotImplemented()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def get_conv_block_ctor(kind='default'):
|
| 22 |
+
if not isinstance(kind, str):
|
| 23 |
+
return kind
|
| 24 |
+
if kind == 'default':
|
| 25 |
+
return nn.Conv2d
|
| 26 |
+
if kind == 'depthwise':
|
| 27 |
+
return DepthWiseSeperableConv
|
| 28 |
+
if kind == 'multidilated':
|
| 29 |
+
return MultidilatedConv
|
| 30 |
+
raise ValueError(f'Unknown convolutional block kind {kind}')
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_norm_layer(kind='bn'):
|
| 34 |
+
if not isinstance(kind, str):
|
| 35 |
+
return kind
|
| 36 |
+
if kind == 'bn':
|
| 37 |
+
return nn.BatchNorm2d
|
| 38 |
+
if kind == 'in':
|
| 39 |
+
return nn.InstanceNorm2d
|
| 40 |
+
raise ValueError(f'Unknown norm block kind {kind}')
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_activation(kind='tanh'):
|
| 44 |
+
if kind == 'tanh':
|
| 45 |
+
return nn.Tanh()
|
| 46 |
+
if kind == 'sigmoid':
|
| 47 |
+
return nn.Sigmoid()
|
| 48 |
+
if kind is False:
|
| 49 |
+
return nn.Identity()
|
| 50 |
+
raise ValueError(f'Unknown activation kind {kind}')
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class SimpleMultiStepGenerator(nn.Module):
|
| 54 |
+
def __init__(self, steps: List[nn.Module]):
|
| 55 |
+
super().__init__()
|
| 56 |
+
self.steps = nn.ModuleList(steps)
|
| 57 |
+
|
| 58 |
+
def forward(self, x):
|
| 59 |
+
cur_in = x
|
| 60 |
+
outs = []
|
| 61 |
+
for step in self.steps:
|
| 62 |
+
cur_out = step(cur_in)
|
| 63 |
+
outs.append(cur_out)
|
| 64 |
+
cur_in = torch.cat((cur_in, cur_out), dim=1)
|
| 65 |
+
return torch.cat(outs[::-1], dim=1)
|
| 66 |
+
|
| 67 |
+
def deconv_factory(kind, ngf, mult, norm_layer, activation, max_features):
|
| 68 |
+
if kind == 'convtranspose':
|
| 69 |
+
return [nn.ConvTranspose2d(min(max_features, ngf * mult),
|
| 70 |
+
min(max_features, int(ngf * mult / 2)),
|
| 71 |
+
kernel_size=3, stride=2, padding=1, output_padding=1),
|
| 72 |
+
norm_layer(min(max_features, int(ngf * mult / 2))), activation]
|
| 73 |
+
elif kind == 'bilinear':
|
| 74 |
+
return [nn.Upsample(scale_factor=2, mode='bilinear'),
|
| 75 |
+
DepthWiseSeperableConv(min(max_features, ngf * mult),
|
| 76 |
+
min(max_features, int(ngf * mult / 2)),
|
| 77 |
+
kernel_size=3, stride=1, padding=1),
|
| 78 |
+
norm_layer(min(max_features, int(ngf * mult / 2))), activation]
|
| 79 |
+
else:
|
| 80 |
+
raise Exception(f"Invalid deconv kind: {kind}")
|
RAVE-main/annotator/lama/saicinpainting/training/modules/depthwise_sep_conv.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class DepthWiseSeperableConv(nn.Module):
|
| 5 |
+
def __init__(self, in_dim, out_dim, *args, **kwargs):
|
| 6 |
+
super().__init__()
|
| 7 |
+
if 'groups' in kwargs:
|
| 8 |
+
# ignoring groups for Depthwise Sep Conv
|
| 9 |
+
del kwargs['groups']
|
| 10 |
+
|
| 11 |
+
self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs)
|
| 12 |
+
self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1)
|
| 13 |
+
|
| 14 |
+
def forward(self, x):
|
| 15 |
+
out = self.depthwise(x)
|
| 16 |
+
out = self.pointwise(out)
|
| 17 |
+
return out
|
RAVE-main/annotator/lama/saicinpainting/training/modules/fake_fakes.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from kornia import SamplePadding
|
| 3 |
+
from kornia.augmentation import RandomAffine, CenterCrop
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class FakeFakesGenerator:
|
| 7 |
+
def __init__(self, aug_proba=0.5, img_aug_degree=30, img_aug_translate=0.2):
|
| 8 |
+
self.grad_aug = RandomAffine(degrees=360,
|
| 9 |
+
translate=0.2,
|
| 10 |
+
padding_mode=SamplePadding.REFLECTION,
|
| 11 |
+
keepdim=False,
|
| 12 |
+
p=1)
|
| 13 |
+
self.img_aug = RandomAffine(degrees=img_aug_degree,
|
| 14 |
+
translate=img_aug_translate,
|
| 15 |
+
padding_mode=SamplePadding.REFLECTION,
|
| 16 |
+
keepdim=True,
|
| 17 |
+
p=1)
|
| 18 |
+
self.aug_proba = aug_proba
|
| 19 |
+
|
| 20 |
+
def __call__(self, input_images, masks):
|
| 21 |
+
blend_masks = self._fill_masks_with_gradient(masks)
|
| 22 |
+
blend_target = self._make_blend_target(input_images)
|
| 23 |
+
result = input_images * (1 - blend_masks) + blend_target * blend_masks
|
| 24 |
+
return result, blend_masks
|
| 25 |
+
|
| 26 |
+
def _make_blend_target(self, input_images):
|
| 27 |
+
batch_size = input_images.shape[0]
|
| 28 |
+
permuted = input_images[torch.randperm(batch_size)]
|
| 29 |
+
augmented = self.img_aug(input_images)
|
| 30 |
+
is_aug = (torch.rand(batch_size, device=input_images.device)[:, None, None, None] < self.aug_proba).float()
|
| 31 |
+
result = augmented * is_aug + permuted * (1 - is_aug)
|
| 32 |
+
return result
|
| 33 |
+
|
| 34 |
+
def _fill_masks_with_gradient(self, masks):
|
| 35 |
+
batch_size, _, height, width = masks.shape
|
| 36 |
+
grad = torch.linspace(0, 1, steps=width * 2, device=masks.device, dtype=masks.dtype) \
|
| 37 |
+
.view(1, 1, 1, -1).expand(batch_size, 1, height * 2, width * 2)
|
| 38 |
+
grad = self.grad_aug(grad)
|
| 39 |
+
grad = CenterCrop((height, width))(grad)
|
| 40 |
+
grad *= masks
|
| 41 |
+
|
| 42 |
+
grad_for_min = grad + (1 - masks) * 10
|
| 43 |
+
grad -= grad_for_min.view(batch_size, -1).min(-1).values[:, None, None, None]
|
| 44 |
+
grad /= grad.view(batch_size, -1).max(-1).values[:, None, None, None] + 1e-6
|
| 45 |
+
grad.clamp_(min=0, max=1)
|
| 46 |
+
|
| 47 |
+
return grad
|
RAVE-main/annotator/lama/saicinpainting/training/modules/ffc.py
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Fast Fourier Convolution NeurIPS 2020
|
| 2 |
+
# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py
|
| 3 |
+
# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
|
| 10 |
+
from annotator.lama.saicinpainting.training.modules.base import get_activation, BaseDiscriminator
|
| 11 |
+
from annotator.lama.saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper
|
| 12 |
+
from annotator.lama.saicinpainting.training.modules.squeeze_excitation import SELayer
|
| 13 |
+
from annotator.lama.saicinpainting.utils import get_shape
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class FFCSE_block(nn.Module):
|
| 17 |
+
|
| 18 |
+
def __init__(self, channels, ratio_g):
|
| 19 |
+
super(FFCSE_block, self).__init__()
|
| 20 |
+
in_cg = int(channels * ratio_g)
|
| 21 |
+
in_cl = channels - in_cg
|
| 22 |
+
r = 16
|
| 23 |
+
|
| 24 |
+
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
| 25 |
+
self.conv1 = nn.Conv2d(channels, channels // r,
|
| 26 |
+
kernel_size=1, bias=True)
|
| 27 |
+
self.relu1 = nn.ReLU(inplace=True)
|
| 28 |
+
self.conv_a2l = None if in_cl == 0 else nn.Conv2d(
|
| 29 |
+
channels // r, in_cl, kernel_size=1, bias=True)
|
| 30 |
+
self.conv_a2g = None if in_cg == 0 else nn.Conv2d(
|
| 31 |
+
channels // r, in_cg, kernel_size=1, bias=True)
|
| 32 |
+
self.sigmoid = nn.Sigmoid()
|
| 33 |
+
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
x = x if type(x) is tuple else (x, 0)
|
| 36 |
+
id_l, id_g = x
|
| 37 |
+
|
| 38 |
+
x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1)
|
| 39 |
+
x = self.avgpool(x)
|
| 40 |
+
x = self.relu1(self.conv1(x))
|
| 41 |
+
|
| 42 |
+
x_l = 0 if self.conv_a2l is None else id_l * \
|
| 43 |
+
self.sigmoid(self.conv_a2l(x))
|
| 44 |
+
x_g = 0 if self.conv_a2g is None else id_g * \
|
| 45 |
+
self.sigmoid(self.conv_a2g(x))
|
| 46 |
+
return x_l, x_g
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class FourierUnit(nn.Module):
|
| 50 |
+
|
| 51 |
+
def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear',
|
| 52 |
+
spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'):
|
| 53 |
+
# bn_layer not used
|
| 54 |
+
super(FourierUnit, self).__init__()
|
| 55 |
+
self.groups = groups
|
| 56 |
+
|
| 57 |
+
self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0),
|
| 58 |
+
out_channels=out_channels * 2,
|
| 59 |
+
kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False)
|
| 60 |
+
self.bn = torch.nn.BatchNorm2d(out_channels * 2)
|
| 61 |
+
self.relu = torch.nn.ReLU(inplace=True)
|
| 62 |
+
|
| 63 |
+
# squeeze and excitation block
|
| 64 |
+
self.use_se = use_se
|
| 65 |
+
if use_se:
|
| 66 |
+
if se_kwargs is None:
|
| 67 |
+
se_kwargs = {}
|
| 68 |
+
self.se = SELayer(self.conv_layer.in_channels, **se_kwargs)
|
| 69 |
+
|
| 70 |
+
self.spatial_scale_factor = spatial_scale_factor
|
| 71 |
+
self.spatial_scale_mode = spatial_scale_mode
|
| 72 |
+
self.spectral_pos_encoding = spectral_pos_encoding
|
| 73 |
+
self.ffc3d = ffc3d
|
| 74 |
+
self.fft_norm = fft_norm
|
| 75 |
+
|
| 76 |
+
def forward(self, x):
|
| 77 |
+
batch = x.shape[0]
|
| 78 |
+
|
| 79 |
+
if self.spatial_scale_factor is not None:
|
| 80 |
+
orig_size = x.shape[-2:]
|
| 81 |
+
x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False)
|
| 82 |
+
|
| 83 |
+
r_size = x.size()
|
| 84 |
+
# (batch, c, h, w/2+1, 2)
|
| 85 |
+
fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1)
|
| 86 |
+
ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm)
|
| 87 |
+
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
|
| 88 |
+
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
|
| 89 |
+
ffted = ffted.view((batch, -1,) + ffted.size()[3:])
|
| 90 |
+
|
| 91 |
+
if self.spectral_pos_encoding:
|
| 92 |
+
height, width = ffted.shape[-2:]
|
| 93 |
+
coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted)
|
| 94 |
+
coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted)
|
| 95 |
+
ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1)
|
| 96 |
+
|
| 97 |
+
if self.use_se:
|
| 98 |
+
ffted = self.se(ffted)
|
| 99 |
+
|
| 100 |
+
ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1)
|
| 101 |
+
ffted = self.relu(self.bn(ffted))
|
| 102 |
+
|
| 103 |
+
ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute(
|
| 104 |
+
0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2)
|
| 105 |
+
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
|
| 106 |
+
|
| 107 |
+
ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:]
|
| 108 |
+
output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm)
|
| 109 |
+
|
| 110 |
+
if self.spatial_scale_factor is not None:
|
| 111 |
+
output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False)
|
| 112 |
+
|
| 113 |
+
return output
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class SeparableFourierUnit(nn.Module):
|
| 117 |
+
|
| 118 |
+
def __init__(self, in_channels, out_channels, groups=1, kernel_size=3):
|
| 119 |
+
# bn_layer not used
|
| 120 |
+
super(SeparableFourierUnit, self).__init__()
|
| 121 |
+
self.groups = groups
|
| 122 |
+
row_out_channels = out_channels // 2
|
| 123 |
+
col_out_channels = out_channels - row_out_channels
|
| 124 |
+
self.row_conv = torch.nn.Conv2d(in_channels=in_channels * 2,
|
| 125 |
+
out_channels=row_out_channels * 2,
|
| 126 |
+
kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed
|
| 127 |
+
stride=1, padding=(kernel_size // 2, 0),
|
| 128 |
+
padding_mode='reflect',
|
| 129 |
+
groups=self.groups, bias=False)
|
| 130 |
+
self.col_conv = torch.nn.Conv2d(in_channels=in_channels * 2,
|
| 131 |
+
out_channels=col_out_channels * 2,
|
| 132 |
+
kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed
|
| 133 |
+
stride=1, padding=(kernel_size // 2, 0),
|
| 134 |
+
padding_mode='reflect',
|
| 135 |
+
groups=self.groups, bias=False)
|
| 136 |
+
self.row_bn = torch.nn.BatchNorm2d(row_out_channels * 2)
|
| 137 |
+
self.col_bn = torch.nn.BatchNorm2d(col_out_channels * 2)
|
| 138 |
+
self.relu = torch.nn.ReLU(inplace=True)
|
| 139 |
+
|
| 140 |
+
def process_branch(self, x, conv, bn):
|
| 141 |
+
batch = x.shape[0]
|
| 142 |
+
|
| 143 |
+
r_size = x.size()
|
| 144 |
+
# (batch, c, h, w/2+1, 2)
|
| 145 |
+
ffted = torch.fft.rfft(x, norm="ortho")
|
| 146 |
+
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
|
| 147 |
+
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
|
| 148 |
+
ffted = ffted.view((batch, -1,) + ffted.size()[3:])
|
| 149 |
+
|
| 150 |
+
ffted = self.relu(bn(conv(ffted)))
|
| 151 |
+
|
| 152 |
+
ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute(
|
| 153 |
+
0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2)
|
| 154 |
+
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
|
| 155 |
+
|
| 156 |
+
output = torch.fft.irfft(ffted, s=x.shape[-1:], norm="ortho")
|
| 157 |
+
return output
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def forward(self, x):
|
| 161 |
+
rowwise = self.process_branch(x, self.row_conv, self.row_bn)
|
| 162 |
+
colwise = self.process_branch(x.permute(0, 1, 3, 2), self.col_conv, self.col_bn).permute(0, 1, 3, 2)
|
| 163 |
+
out = torch.cat((rowwise, colwise), dim=1)
|
| 164 |
+
return out
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class SpectralTransform(nn.Module):
|
| 168 |
+
|
| 169 |
+
def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, separable_fu=False, **fu_kwargs):
|
| 170 |
+
# bn_layer not used
|
| 171 |
+
super(SpectralTransform, self).__init__()
|
| 172 |
+
self.enable_lfu = enable_lfu
|
| 173 |
+
if stride == 2:
|
| 174 |
+
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
|
| 175 |
+
else:
|
| 176 |
+
self.downsample = nn.Identity()
|
| 177 |
+
|
| 178 |
+
self.stride = stride
|
| 179 |
+
self.conv1 = nn.Sequential(
|
| 180 |
+
nn.Conv2d(in_channels, out_channels //
|
| 181 |
+
2, kernel_size=1, groups=groups, bias=False),
|
| 182 |
+
nn.BatchNorm2d(out_channels // 2),
|
| 183 |
+
nn.ReLU(inplace=True)
|
| 184 |
+
)
|
| 185 |
+
fu_class = SeparableFourierUnit if separable_fu else FourierUnit
|
| 186 |
+
self.fu = fu_class(
|
| 187 |
+
out_channels // 2, out_channels // 2, groups, **fu_kwargs)
|
| 188 |
+
if self.enable_lfu:
|
| 189 |
+
self.lfu = fu_class(
|
| 190 |
+
out_channels // 2, out_channels // 2, groups)
|
| 191 |
+
self.conv2 = torch.nn.Conv2d(
|
| 192 |
+
out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False)
|
| 193 |
+
|
| 194 |
+
def forward(self, x):
|
| 195 |
+
|
| 196 |
+
x = self.downsample(x)
|
| 197 |
+
x = self.conv1(x)
|
| 198 |
+
output = self.fu(x)
|
| 199 |
+
|
| 200 |
+
if self.enable_lfu:
|
| 201 |
+
n, c, h, w = x.shape
|
| 202 |
+
split_no = 2
|
| 203 |
+
split_s = h // split_no
|
| 204 |
+
xs = torch.cat(torch.split(
|
| 205 |
+
x[:, :c // 4], split_s, dim=-2), dim=1).contiguous()
|
| 206 |
+
xs = torch.cat(torch.split(xs, split_s, dim=-1),
|
| 207 |
+
dim=1).contiguous()
|
| 208 |
+
xs = self.lfu(xs)
|
| 209 |
+
xs = xs.repeat(1, 1, split_no, split_no).contiguous()
|
| 210 |
+
else:
|
| 211 |
+
xs = 0
|
| 212 |
+
|
| 213 |
+
output = self.conv2(x + output + xs)
|
| 214 |
+
|
| 215 |
+
return output
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class FFC(nn.Module):
|
| 219 |
+
|
| 220 |
+
def __init__(self, in_channels, out_channels, kernel_size,
|
| 221 |
+
ratio_gin, ratio_gout, stride=1, padding=0,
|
| 222 |
+
dilation=1, groups=1, bias=False, enable_lfu=True,
|
| 223 |
+
padding_type='reflect', gated=False, **spectral_kwargs):
|
| 224 |
+
super(FFC, self).__init__()
|
| 225 |
+
|
| 226 |
+
assert stride == 1 or stride == 2, "Stride should be 1 or 2."
|
| 227 |
+
self.stride = stride
|
| 228 |
+
|
| 229 |
+
in_cg = int(in_channels * ratio_gin)
|
| 230 |
+
in_cl = in_channels - in_cg
|
| 231 |
+
out_cg = int(out_channels * ratio_gout)
|
| 232 |
+
out_cl = out_channels - out_cg
|
| 233 |
+
#groups_g = 1 if groups == 1 else int(groups * ratio_gout)
|
| 234 |
+
#groups_l = 1 if groups == 1 else groups - groups_g
|
| 235 |
+
|
| 236 |
+
self.ratio_gin = ratio_gin
|
| 237 |
+
self.ratio_gout = ratio_gout
|
| 238 |
+
self.global_in_num = in_cg
|
| 239 |
+
|
| 240 |
+
module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d
|
| 241 |
+
self.convl2l = module(in_cl, out_cl, kernel_size,
|
| 242 |
+
stride, padding, dilation, groups, bias, padding_mode=padding_type)
|
| 243 |
+
module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d
|
| 244 |
+
self.convl2g = module(in_cl, out_cg, kernel_size,
|
| 245 |
+
stride, padding, dilation, groups, bias, padding_mode=padding_type)
|
| 246 |
+
module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d
|
| 247 |
+
self.convg2l = module(in_cg, out_cl, kernel_size,
|
| 248 |
+
stride, padding, dilation, groups, bias, padding_mode=padding_type)
|
| 249 |
+
module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform
|
| 250 |
+
self.convg2g = module(
|
| 251 |
+
in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs)
|
| 252 |
+
|
| 253 |
+
self.gated = gated
|
| 254 |
+
module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d
|
| 255 |
+
self.gate = module(in_channels, 2, 1)
|
| 256 |
+
|
| 257 |
+
def forward(self, x):
|
| 258 |
+
x_l, x_g = x if type(x) is tuple else (x, 0)
|
| 259 |
+
out_xl, out_xg = 0, 0
|
| 260 |
+
|
| 261 |
+
if self.gated:
|
| 262 |
+
total_input_parts = [x_l]
|
| 263 |
+
if torch.is_tensor(x_g):
|
| 264 |
+
total_input_parts.append(x_g)
|
| 265 |
+
total_input = torch.cat(total_input_parts, dim=1)
|
| 266 |
+
|
| 267 |
+
gates = torch.sigmoid(self.gate(total_input))
|
| 268 |
+
g2l_gate, l2g_gate = gates.chunk(2, dim=1)
|
| 269 |
+
else:
|
| 270 |
+
g2l_gate, l2g_gate = 1, 1
|
| 271 |
+
|
| 272 |
+
if self.ratio_gout != 1:
|
| 273 |
+
out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate
|
| 274 |
+
if self.ratio_gout != 0:
|
| 275 |
+
out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g)
|
| 276 |
+
|
| 277 |
+
return out_xl, out_xg
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
class FFC_BN_ACT(nn.Module):
|
| 281 |
+
|
| 282 |
+
def __init__(self, in_channels, out_channels,
|
| 283 |
+
kernel_size, ratio_gin, ratio_gout,
|
| 284 |
+
stride=1, padding=0, dilation=1, groups=1, bias=False,
|
| 285 |
+
norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity,
|
| 286 |
+
padding_type='reflect',
|
| 287 |
+
enable_lfu=True, **kwargs):
|
| 288 |
+
super(FFC_BN_ACT, self).__init__()
|
| 289 |
+
self.ffc = FFC(in_channels, out_channels, kernel_size,
|
| 290 |
+
ratio_gin, ratio_gout, stride, padding, dilation,
|
| 291 |
+
groups, bias, enable_lfu, padding_type=padding_type, **kwargs)
|
| 292 |
+
lnorm = nn.Identity if ratio_gout == 1 else norm_layer
|
| 293 |
+
gnorm = nn.Identity if ratio_gout == 0 else norm_layer
|
| 294 |
+
global_channels = int(out_channels * ratio_gout)
|
| 295 |
+
self.bn_l = lnorm(out_channels - global_channels)
|
| 296 |
+
self.bn_g = gnorm(global_channels)
|
| 297 |
+
|
| 298 |
+
lact = nn.Identity if ratio_gout == 1 else activation_layer
|
| 299 |
+
gact = nn.Identity if ratio_gout == 0 else activation_layer
|
| 300 |
+
self.act_l = lact(inplace=True)
|
| 301 |
+
self.act_g = gact(inplace=True)
|
| 302 |
+
|
| 303 |
+
def forward(self, x):
|
| 304 |
+
x_l, x_g = self.ffc(x)
|
| 305 |
+
x_l = self.act_l(self.bn_l(x_l))
|
| 306 |
+
x_g = self.act_g(self.bn_g(x_g))
|
| 307 |
+
return x_l, x_g
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
class FFCResnetBlock(nn.Module):
|
| 311 |
+
def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1,
|
| 312 |
+
spatial_transform_kwargs=None, inline=False, **conv_kwargs):
|
| 313 |
+
super().__init__()
|
| 314 |
+
self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
|
| 315 |
+
norm_layer=norm_layer,
|
| 316 |
+
activation_layer=activation_layer,
|
| 317 |
+
padding_type=padding_type,
|
| 318 |
+
**conv_kwargs)
|
| 319 |
+
self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation,
|
| 320 |
+
norm_layer=norm_layer,
|
| 321 |
+
activation_layer=activation_layer,
|
| 322 |
+
padding_type=padding_type,
|
| 323 |
+
**conv_kwargs)
|
| 324 |
+
if spatial_transform_kwargs is not None:
|
| 325 |
+
self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs)
|
| 326 |
+
self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs)
|
| 327 |
+
self.inline = inline
|
| 328 |
+
|
| 329 |
+
def forward(self, x):
|
| 330 |
+
if self.inline:
|
| 331 |
+
x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:]
|
| 332 |
+
else:
|
| 333 |
+
x_l, x_g = x if type(x) is tuple else (x, 0)
|
| 334 |
+
|
| 335 |
+
id_l, id_g = x_l, x_g
|
| 336 |
+
|
| 337 |
+
x_l, x_g = self.conv1((x_l, x_g))
|
| 338 |
+
x_l, x_g = self.conv2((x_l, x_g))
|
| 339 |
+
|
| 340 |
+
x_l, x_g = id_l + x_l, id_g + x_g
|
| 341 |
+
out = x_l, x_g
|
| 342 |
+
if self.inline:
|
| 343 |
+
out = torch.cat(out, dim=1)
|
| 344 |
+
return out
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class ConcatTupleLayer(nn.Module):
|
| 348 |
+
def forward(self, x):
|
| 349 |
+
assert isinstance(x, tuple)
|
| 350 |
+
x_l, x_g = x
|
| 351 |
+
assert torch.is_tensor(x_l) or torch.is_tensor(x_g)
|
| 352 |
+
if not torch.is_tensor(x_g):
|
| 353 |
+
return x_l
|
| 354 |
+
return torch.cat(x, dim=1)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class FFCResNetGenerator(nn.Module):
|
| 358 |
+
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
|
| 359 |
+
padding_type='reflect', activation_layer=nn.ReLU,
|
| 360 |
+
up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True),
|
| 361 |
+
init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={},
|
| 362 |
+
spatial_transform_layers=None, spatial_transform_kwargs={},
|
| 363 |
+
add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}):
|
| 364 |
+
assert (n_blocks >= 0)
|
| 365 |
+
super().__init__()
|
| 366 |
+
|
| 367 |
+
model = [nn.ReflectionPad2d(3),
|
| 368 |
+
FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer,
|
| 369 |
+
activation_layer=activation_layer, **init_conv_kwargs)]
|
| 370 |
+
|
| 371 |
+
### downsample
|
| 372 |
+
for i in range(n_downsampling):
|
| 373 |
+
mult = 2 ** i
|
| 374 |
+
if i == n_downsampling - 1:
|
| 375 |
+
cur_conv_kwargs = dict(downsample_conv_kwargs)
|
| 376 |
+
cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0)
|
| 377 |
+
else:
|
| 378 |
+
cur_conv_kwargs = downsample_conv_kwargs
|
| 379 |
+
model += [FFC_BN_ACT(min(max_features, ngf * mult),
|
| 380 |
+
min(max_features, ngf * mult * 2),
|
| 381 |
+
kernel_size=3, stride=2, padding=1,
|
| 382 |
+
norm_layer=norm_layer,
|
| 383 |
+
activation_layer=activation_layer,
|
| 384 |
+
**cur_conv_kwargs)]
|
| 385 |
+
|
| 386 |
+
mult = 2 ** n_downsampling
|
| 387 |
+
feats_num_bottleneck = min(max_features, ngf * mult)
|
| 388 |
+
|
| 389 |
+
### resnet blocks
|
| 390 |
+
for i in range(n_blocks):
|
| 391 |
+
cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer,
|
| 392 |
+
norm_layer=norm_layer, **resnet_conv_kwargs)
|
| 393 |
+
if spatial_transform_layers is not None and i in spatial_transform_layers:
|
| 394 |
+
cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs)
|
| 395 |
+
model += [cur_resblock]
|
| 396 |
+
|
| 397 |
+
model += [ConcatTupleLayer()]
|
| 398 |
+
|
| 399 |
+
### upsample
|
| 400 |
+
for i in range(n_downsampling):
|
| 401 |
+
mult = 2 ** (n_downsampling - i)
|
| 402 |
+
model += [nn.ConvTranspose2d(min(max_features, ngf * mult),
|
| 403 |
+
min(max_features, int(ngf * mult / 2)),
|
| 404 |
+
kernel_size=3, stride=2, padding=1, output_padding=1),
|
| 405 |
+
up_norm_layer(min(max_features, int(ngf * mult / 2))),
|
| 406 |
+
up_activation]
|
| 407 |
+
|
| 408 |
+
if out_ffc:
|
| 409 |
+
model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer,
|
| 410 |
+
norm_layer=norm_layer, inline=True, **out_ffc_kwargs)]
|
| 411 |
+
|
| 412 |
+
model += [nn.ReflectionPad2d(3),
|
| 413 |
+
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
| 414 |
+
if add_out_act:
|
| 415 |
+
model.append(get_activation('tanh' if add_out_act is True else add_out_act))
|
| 416 |
+
self.model = nn.Sequential(*model)
|
| 417 |
+
|
| 418 |
+
def forward(self, input):
|
| 419 |
+
return self.model(input)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
class FFCNLayerDiscriminator(BaseDiscriminator):
|
| 423 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512,
|
| 424 |
+
init_conv_kwargs={}, conv_kwargs={}):
|
| 425 |
+
super().__init__()
|
| 426 |
+
self.n_layers = n_layers
|
| 427 |
+
|
| 428 |
+
def _act_ctor(inplace=True):
|
| 429 |
+
return nn.LeakyReLU(negative_slope=0.2, inplace=inplace)
|
| 430 |
+
|
| 431 |
+
kw = 3
|
| 432 |
+
padw = int(np.ceil((kw-1.0)/2))
|
| 433 |
+
sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer,
|
| 434 |
+
activation_layer=_act_ctor, **init_conv_kwargs)]]
|
| 435 |
+
|
| 436 |
+
nf = ndf
|
| 437 |
+
for n in range(1, n_layers):
|
| 438 |
+
nf_prev = nf
|
| 439 |
+
nf = min(nf * 2, max_features)
|
| 440 |
+
|
| 441 |
+
cur_model = [
|
| 442 |
+
FFC_BN_ACT(nf_prev, nf,
|
| 443 |
+
kernel_size=kw, stride=2, padding=padw,
|
| 444 |
+
norm_layer=norm_layer,
|
| 445 |
+
activation_layer=_act_ctor,
|
| 446 |
+
**conv_kwargs)
|
| 447 |
+
]
|
| 448 |
+
sequence.append(cur_model)
|
| 449 |
+
|
| 450 |
+
nf_prev = nf
|
| 451 |
+
nf = min(nf * 2, 512)
|
| 452 |
+
|
| 453 |
+
cur_model = [
|
| 454 |
+
FFC_BN_ACT(nf_prev, nf,
|
| 455 |
+
kernel_size=kw, stride=1, padding=padw,
|
| 456 |
+
norm_layer=norm_layer,
|
| 457 |
+
activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs),
|
| 458 |
+
**conv_kwargs),
|
| 459 |
+
ConcatTupleLayer()
|
| 460 |
+
]
|
| 461 |
+
sequence.append(cur_model)
|
| 462 |
+
|
| 463 |
+
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
| 464 |
+
|
| 465 |
+
for n in range(len(sequence)):
|
| 466 |
+
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
|
| 467 |
+
|
| 468 |
+
def get_all_activations(self, x):
|
| 469 |
+
res = [x]
|
| 470 |
+
for n in range(self.n_layers + 2):
|
| 471 |
+
model = getattr(self, 'model' + str(n))
|
| 472 |
+
res.append(model(res[-1]))
|
| 473 |
+
return res[1:]
|
| 474 |
+
|
| 475 |
+
def forward(self, x):
|
| 476 |
+
act = self.get_all_activations(x)
|
| 477 |
+
feats = []
|
| 478 |
+
for out in act[:-1]:
|
| 479 |
+
if isinstance(out, tuple):
|
| 480 |
+
if torch.is_tensor(out[1]):
|
| 481 |
+
out = torch.cat(out, dim=1)
|
| 482 |
+
else:
|
| 483 |
+
out = out[0]
|
| 484 |
+
feats.append(out)
|
| 485 |
+
return act[-1], feats
|
RAVE-main/annotator/lama/saicinpainting/training/modules/multidilated_conv.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import random
|
| 4 |
+
from annotator.lama.saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv
|
| 5 |
+
|
| 6 |
+
class MultidilatedConv(nn.Module):
|
| 7 |
+
def __init__(self, in_dim, out_dim, kernel_size, dilation_num=3, comb_mode='sum', equal_dim=True,
|
| 8 |
+
shared_weights=False, padding=1, min_dilation=1, shuffle_in_channels=False, use_depthwise=False, **kwargs):
|
| 9 |
+
super().__init__()
|
| 10 |
+
convs = []
|
| 11 |
+
self.equal_dim = equal_dim
|
| 12 |
+
assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode
|
| 13 |
+
if comb_mode in ('cat_out', 'cat_both'):
|
| 14 |
+
self.cat_out = True
|
| 15 |
+
if equal_dim:
|
| 16 |
+
assert out_dim % dilation_num == 0
|
| 17 |
+
out_dims = [out_dim // dilation_num] * dilation_num
|
| 18 |
+
self.index = sum([[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])], [])
|
| 19 |
+
else:
|
| 20 |
+
out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
|
| 21 |
+
out_dims.append(out_dim - sum(out_dims))
|
| 22 |
+
index = []
|
| 23 |
+
starts = [0] + out_dims[:-1]
|
| 24 |
+
lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)]
|
| 25 |
+
for i in range(out_dims[-1]):
|
| 26 |
+
for j in range(dilation_num):
|
| 27 |
+
index += list(range(starts[j], starts[j] + lengths[j]))
|
| 28 |
+
starts[j] += lengths[j]
|
| 29 |
+
self.index = index
|
| 30 |
+
assert(len(index) == out_dim)
|
| 31 |
+
self.out_dims = out_dims
|
| 32 |
+
else:
|
| 33 |
+
self.cat_out = False
|
| 34 |
+
self.out_dims = [out_dim] * dilation_num
|
| 35 |
+
|
| 36 |
+
if comb_mode in ('cat_in', 'cat_both'):
|
| 37 |
+
if equal_dim:
|
| 38 |
+
assert in_dim % dilation_num == 0
|
| 39 |
+
in_dims = [in_dim // dilation_num] * dilation_num
|
| 40 |
+
else:
|
| 41 |
+
in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)]
|
| 42 |
+
in_dims.append(in_dim - sum(in_dims))
|
| 43 |
+
self.in_dims = in_dims
|
| 44 |
+
self.cat_in = True
|
| 45 |
+
else:
|
| 46 |
+
self.cat_in = False
|
| 47 |
+
self.in_dims = [in_dim] * dilation_num
|
| 48 |
+
|
| 49 |
+
conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d
|
| 50 |
+
dilation = min_dilation
|
| 51 |
+
for i in range(dilation_num):
|
| 52 |
+
if isinstance(padding, int):
|
| 53 |
+
cur_padding = padding * dilation
|
| 54 |
+
else:
|
| 55 |
+
cur_padding = padding[i]
|
| 56 |
+
convs.append(conv_type(
|
| 57 |
+
self.in_dims[i], self.out_dims[i], kernel_size, padding=cur_padding, dilation=dilation, **kwargs
|
| 58 |
+
))
|
| 59 |
+
if i > 0 and shared_weights:
|
| 60 |
+
convs[-1].weight = convs[0].weight
|
| 61 |
+
convs[-1].bias = convs[0].bias
|
| 62 |
+
dilation *= 2
|
| 63 |
+
self.convs = nn.ModuleList(convs)
|
| 64 |
+
|
| 65 |
+
self.shuffle_in_channels = shuffle_in_channels
|
| 66 |
+
if self.shuffle_in_channels:
|
| 67 |
+
# shuffle list as shuffling of tensors is nondeterministic
|
| 68 |
+
in_channels_permute = list(range(in_dim))
|
| 69 |
+
random.shuffle(in_channels_permute)
|
| 70 |
+
# save as buffer so it is saved and loaded with checkpoint
|
| 71 |
+
self.register_buffer('in_channels_permute', torch.tensor(in_channels_permute))
|
| 72 |
+
|
| 73 |
+
def forward(self, x):
|
| 74 |
+
if self.shuffle_in_channels:
|
| 75 |
+
x = x[:, self.in_channels_permute]
|
| 76 |
+
|
| 77 |
+
outs = []
|
| 78 |
+
if self.cat_in:
|
| 79 |
+
if self.equal_dim:
|
| 80 |
+
x = x.chunk(len(self.convs), dim=1)
|
| 81 |
+
else:
|
| 82 |
+
new_x = []
|
| 83 |
+
start = 0
|
| 84 |
+
for dim in self.in_dims:
|
| 85 |
+
new_x.append(x[:, start:start+dim])
|
| 86 |
+
start += dim
|
| 87 |
+
x = new_x
|
| 88 |
+
for i, conv in enumerate(self.convs):
|
| 89 |
+
if self.cat_in:
|
| 90 |
+
input = x[i]
|
| 91 |
+
else:
|
| 92 |
+
input = x
|
| 93 |
+
outs.append(conv(input))
|
| 94 |
+
if self.cat_out:
|
| 95 |
+
out = torch.cat(outs, dim=1)[:, self.index]
|
| 96 |
+
else:
|
| 97 |
+
out = sum(outs)
|
| 98 |
+
return out
|
RAVE-main/annotator/lama/saicinpainting/training/modules/multiscale.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Tuple, Union, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
from annotator.lama.saicinpainting.training.modules.base import get_conv_block_ctor, get_activation
|
| 8 |
+
from annotator.lama.saicinpainting.training.modules.pix2pixhd import ResnetBlock
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ResNetHead(nn.Module):
|
| 12 |
+
def __init__(self, input_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
|
| 13 |
+
padding_type='reflect', conv_kind='default', activation=nn.ReLU(True)):
|
| 14 |
+
assert (n_blocks >= 0)
|
| 15 |
+
super(ResNetHead, self).__init__()
|
| 16 |
+
|
| 17 |
+
conv_layer = get_conv_block_ctor(conv_kind)
|
| 18 |
+
|
| 19 |
+
model = [nn.ReflectionPad2d(3),
|
| 20 |
+
conv_layer(input_nc, ngf, kernel_size=7, padding=0),
|
| 21 |
+
norm_layer(ngf),
|
| 22 |
+
activation]
|
| 23 |
+
|
| 24 |
+
### downsample
|
| 25 |
+
for i in range(n_downsampling):
|
| 26 |
+
mult = 2 ** i
|
| 27 |
+
model += [conv_layer(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
|
| 28 |
+
norm_layer(ngf * mult * 2),
|
| 29 |
+
activation]
|
| 30 |
+
|
| 31 |
+
mult = 2 ** n_downsampling
|
| 32 |
+
|
| 33 |
+
### resnet blocks
|
| 34 |
+
for i in range(n_blocks):
|
| 35 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,
|
| 36 |
+
conv_kind=conv_kind)]
|
| 37 |
+
|
| 38 |
+
self.model = nn.Sequential(*model)
|
| 39 |
+
|
| 40 |
+
def forward(self, input):
|
| 41 |
+
return self.model(input)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class ResNetTail(nn.Module):
|
| 45 |
+
def __init__(self, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
|
| 46 |
+
padding_type='reflect', conv_kind='default', activation=nn.ReLU(True),
|
| 47 |
+
up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0,
|
| 48 |
+
add_in_proj=None):
|
| 49 |
+
assert (n_blocks >= 0)
|
| 50 |
+
super(ResNetTail, self).__init__()
|
| 51 |
+
|
| 52 |
+
mult = 2 ** n_downsampling
|
| 53 |
+
|
| 54 |
+
model = []
|
| 55 |
+
|
| 56 |
+
if add_in_proj is not None:
|
| 57 |
+
model.append(nn.Conv2d(add_in_proj, ngf * mult, kernel_size=1))
|
| 58 |
+
|
| 59 |
+
### resnet blocks
|
| 60 |
+
for i in range(n_blocks):
|
| 61 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,
|
| 62 |
+
conv_kind=conv_kind)]
|
| 63 |
+
|
| 64 |
+
### upsample
|
| 65 |
+
for i in range(n_downsampling):
|
| 66 |
+
mult = 2 ** (n_downsampling - i)
|
| 67 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
|
| 68 |
+
output_padding=1),
|
| 69 |
+
up_norm_layer(int(ngf * mult / 2)),
|
| 70 |
+
up_activation]
|
| 71 |
+
self.model = nn.Sequential(*model)
|
| 72 |
+
|
| 73 |
+
out_layers = []
|
| 74 |
+
for _ in range(out_extra_layers_n):
|
| 75 |
+
out_layers += [nn.Conv2d(ngf, ngf, kernel_size=1, padding=0),
|
| 76 |
+
up_norm_layer(ngf),
|
| 77 |
+
up_activation]
|
| 78 |
+
out_layers += [nn.ReflectionPad2d(3),
|
| 79 |
+
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
| 80 |
+
|
| 81 |
+
if add_out_act:
|
| 82 |
+
out_layers.append(get_activation('tanh' if add_out_act is True else add_out_act))
|
| 83 |
+
|
| 84 |
+
self.out_proj = nn.Sequential(*out_layers)
|
| 85 |
+
|
| 86 |
+
def forward(self, input, return_last_act=False):
|
| 87 |
+
features = self.model(input)
|
| 88 |
+
out = self.out_proj(features)
|
| 89 |
+
if return_last_act:
|
| 90 |
+
return out, features
|
| 91 |
+
else:
|
| 92 |
+
return out
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class MultiscaleResNet(nn.Module):
|
| 96 |
+
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=2, n_blocks_head=2, n_blocks_tail=6, n_scales=3,
|
| 97 |
+
norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True),
|
| 98 |
+
up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0,
|
| 99 |
+
out_cumulative=False, return_only_hr=False):
|
| 100 |
+
super().__init__()
|
| 101 |
+
|
| 102 |
+
self.heads = nn.ModuleList([ResNetHead(input_nc, ngf=ngf, n_downsampling=n_downsampling,
|
| 103 |
+
n_blocks=n_blocks_head, norm_layer=norm_layer, padding_type=padding_type,
|
| 104 |
+
conv_kind=conv_kind, activation=activation)
|
| 105 |
+
for i in range(n_scales)])
|
| 106 |
+
tail_in_feats = ngf * (2 ** n_downsampling) + ngf
|
| 107 |
+
self.tails = nn.ModuleList([ResNetTail(output_nc,
|
| 108 |
+
ngf=ngf, n_downsampling=n_downsampling,
|
| 109 |
+
n_blocks=n_blocks_tail, norm_layer=norm_layer, padding_type=padding_type,
|
| 110 |
+
conv_kind=conv_kind, activation=activation, up_norm_layer=up_norm_layer,
|
| 111 |
+
up_activation=up_activation, add_out_act=add_out_act,
|
| 112 |
+
out_extra_layers_n=out_extra_layers_n,
|
| 113 |
+
add_in_proj=None if (i == n_scales - 1) else tail_in_feats)
|
| 114 |
+
for i in range(n_scales)])
|
| 115 |
+
|
| 116 |
+
self.out_cumulative = out_cumulative
|
| 117 |
+
self.return_only_hr = return_only_hr
|
| 118 |
+
|
| 119 |
+
@property
|
| 120 |
+
def num_scales(self):
|
| 121 |
+
return len(self.heads)
|
| 122 |
+
|
| 123 |
+
def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \
|
| 124 |
+
-> Union[torch.Tensor, List[torch.Tensor]]:
|
| 125 |
+
"""
|
| 126 |
+
:param ms_inputs: List of inputs of different resolutions from HR to LR
|
| 127 |
+
:param smallest_scales_num: int or None, number of smallest scales to take at input
|
| 128 |
+
:return: Depending on return_only_hr:
|
| 129 |
+
True: Only the most HR output
|
| 130 |
+
False: List of outputs of different resolutions from HR to LR
|
| 131 |
+
"""
|
| 132 |
+
if smallest_scales_num is None:
|
| 133 |
+
assert len(self.heads) == len(ms_inputs), (len(self.heads), len(ms_inputs), smallest_scales_num)
|
| 134 |
+
smallest_scales_num = len(self.heads)
|
| 135 |
+
else:
|
| 136 |
+
assert smallest_scales_num == len(ms_inputs) <= len(self.heads), (len(self.heads), len(ms_inputs), smallest_scales_num)
|
| 137 |
+
|
| 138 |
+
cur_heads = self.heads[-smallest_scales_num:]
|
| 139 |
+
ms_features = [cur_head(cur_inp) for cur_head, cur_inp in zip(cur_heads, ms_inputs)]
|
| 140 |
+
|
| 141 |
+
all_outputs = []
|
| 142 |
+
prev_tail_features = None
|
| 143 |
+
for i in range(len(ms_features)):
|
| 144 |
+
scale_i = -i - 1
|
| 145 |
+
|
| 146 |
+
cur_tail_input = ms_features[-i - 1]
|
| 147 |
+
if prev_tail_features is not None:
|
| 148 |
+
if prev_tail_features.shape != cur_tail_input.shape:
|
| 149 |
+
prev_tail_features = F.interpolate(prev_tail_features, size=cur_tail_input.shape[2:],
|
| 150 |
+
mode='bilinear', align_corners=False)
|
| 151 |
+
cur_tail_input = torch.cat((cur_tail_input, prev_tail_features), dim=1)
|
| 152 |
+
|
| 153 |
+
cur_out, cur_tail_feats = self.tails[scale_i](cur_tail_input, return_last_act=True)
|
| 154 |
+
|
| 155 |
+
prev_tail_features = cur_tail_feats
|
| 156 |
+
all_outputs.append(cur_out)
|
| 157 |
+
|
| 158 |
+
if self.out_cumulative:
|
| 159 |
+
all_outputs_cum = [all_outputs[0]]
|
| 160 |
+
for i in range(1, len(ms_features)):
|
| 161 |
+
cur_out = all_outputs[i]
|
| 162 |
+
cur_out_cum = cur_out + F.interpolate(all_outputs_cum[-1], size=cur_out.shape[2:],
|
| 163 |
+
mode='bilinear', align_corners=False)
|
| 164 |
+
all_outputs_cum.append(cur_out_cum)
|
| 165 |
+
all_outputs = all_outputs_cum
|
| 166 |
+
|
| 167 |
+
if self.return_only_hr:
|
| 168 |
+
return all_outputs[-1]
|
| 169 |
+
else:
|
| 170 |
+
return all_outputs[::-1]
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class MultiscaleDiscriminatorSimple(nn.Module):
|
| 174 |
+
def __init__(self, ms_impl):
|
| 175 |
+
super().__init__()
|
| 176 |
+
self.ms_impl = nn.ModuleList(ms_impl)
|
| 177 |
+
|
| 178 |
+
@property
|
| 179 |
+
def num_scales(self):
|
| 180 |
+
return len(self.ms_impl)
|
| 181 |
+
|
| 182 |
+
def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \
|
| 183 |
+
-> List[Tuple[torch.Tensor, List[torch.Tensor]]]:
|
| 184 |
+
"""
|
| 185 |
+
:param ms_inputs: List of inputs of different resolutions from HR to LR
|
| 186 |
+
:param smallest_scales_num: int or None, number of smallest scales to take at input
|
| 187 |
+
:return: List of pairs (prediction, features) for different resolutions from HR to LR
|
| 188 |
+
"""
|
| 189 |
+
if smallest_scales_num is None:
|
| 190 |
+
assert len(self.ms_impl) == len(ms_inputs), (len(self.ms_impl), len(ms_inputs), smallest_scales_num)
|
| 191 |
+
smallest_scales_num = len(self.heads)
|
| 192 |
+
else:
|
| 193 |
+
assert smallest_scales_num == len(ms_inputs) <= len(self.ms_impl), \
|
| 194 |
+
(len(self.ms_impl), len(ms_inputs), smallest_scales_num)
|
| 195 |
+
|
| 196 |
+
return [cur_discr(cur_input) for cur_discr, cur_input in zip(self.ms_impl[-smallest_scales_num:], ms_inputs)]
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class SingleToMultiScaleInputMixin:
|
| 200 |
+
def forward(self, x: torch.Tensor) -> List:
|
| 201 |
+
orig_height, orig_width = x.shape[2:]
|
| 202 |
+
factors = [2 ** i for i in range(self.num_scales)]
|
| 203 |
+
ms_inputs = [F.interpolate(x, size=(orig_height // f, orig_width // f), mode='bilinear', align_corners=False)
|
| 204 |
+
for f in factors]
|
| 205 |
+
return super().forward(ms_inputs)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class GeneratorMultiToSingleOutputMixin:
|
| 209 |
+
def forward(self, x):
|
| 210 |
+
return super().forward(x)[0]
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class DiscriminatorMultiToSingleOutputMixin:
|
| 214 |
+
def forward(self, x):
|
| 215 |
+
out_feat_tuples = super().forward(x)
|
| 216 |
+
return out_feat_tuples[0][0], [f for _, flist in out_feat_tuples for f in flist]
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class DiscriminatorMultiToSingleOutputStackedMixin:
|
| 220 |
+
def __init__(self, *args, return_feats_only_levels=None, **kwargs):
|
| 221 |
+
super().__init__(*args, **kwargs)
|
| 222 |
+
self.return_feats_only_levels = return_feats_only_levels
|
| 223 |
+
|
| 224 |
+
def forward(self, x):
|
| 225 |
+
out_feat_tuples = super().forward(x)
|
| 226 |
+
outs = [out for out, _ in out_feat_tuples]
|
| 227 |
+
scaled_outs = [outs[0]] + [F.interpolate(cur_out, size=outs[0].shape[-2:],
|
| 228 |
+
mode='bilinear', align_corners=False)
|
| 229 |
+
for cur_out in outs[1:]]
|
| 230 |
+
out = torch.cat(scaled_outs, dim=1)
|
| 231 |
+
if self.return_feats_only_levels is not None:
|
| 232 |
+
feat_lists = [out_feat_tuples[i][1] for i in self.return_feats_only_levels]
|
| 233 |
+
else:
|
| 234 |
+
feat_lists = [flist for _, flist in out_feat_tuples]
|
| 235 |
+
feats = [f for flist in feat_lists for f in flist]
|
| 236 |
+
return out, feats
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
class MultiscaleDiscrSingleInput(SingleToMultiScaleInputMixin, DiscriminatorMultiToSingleOutputStackedMixin, MultiscaleDiscriminatorSimple):
|
| 240 |
+
pass
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class MultiscaleResNetSingle(GeneratorMultiToSingleOutputMixin, SingleToMultiScaleInputMixin, MultiscaleResNet):
|
| 244 |
+
pass
|
RAVE-main/annotator/lama/saicinpainting/training/modules/pix2pixhd.py
ADDED
|
@@ -0,0 +1,669 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# original: https://github.com/NVIDIA/pix2pixHD/blob/master/models/networks.py
|
| 2 |
+
import collections
|
| 3 |
+
from functools import partial
|
| 4 |
+
import functools
|
| 5 |
+
import logging
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
|
| 11 |
+
from annotator.lama.saicinpainting.training.modules.base import BaseDiscriminator, deconv_factory, get_conv_block_ctor, get_norm_layer, get_activation
|
| 12 |
+
from annotator.lama.saicinpainting.training.modules.ffc import FFCResnetBlock
|
| 13 |
+
from annotator.lama.saicinpainting.training.modules.multidilated_conv import MultidilatedConv
|
| 14 |
+
|
| 15 |
+
class DotDict(defaultdict):
|
| 16 |
+
# https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary
|
| 17 |
+
"""dot.notation access to dictionary attributes"""
|
| 18 |
+
__getattr__ = defaultdict.get
|
| 19 |
+
__setattr__ = defaultdict.__setitem__
|
| 20 |
+
__delattr__ = defaultdict.__delitem__
|
| 21 |
+
|
| 22 |
+
class Identity(nn.Module):
|
| 23 |
+
def __init__(self):
|
| 24 |
+
super().__init__()
|
| 25 |
+
|
| 26 |
+
def forward(self, x):
|
| 27 |
+
return x
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ResnetBlock(nn.Module):
|
| 31 |
+
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default',
|
| 32 |
+
dilation=1, in_dim=None, groups=1, second_dilation=None):
|
| 33 |
+
super(ResnetBlock, self).__init__()
|
| 34 |
+
self.in_dim = in_dim
|
| 35 |
+
self.dim = dim
|
| 36 |
+
if second_dilation is None:
|
| 37 |
+
second_dilation = dilation
|
| 38 |
+
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout,
|
| 39 |
+
conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups,
|
| 40 |
+
second_dilation=second_dilation)
|
| 41 |
+
|
| 42 |
+
if self.in_dim is not None:
|
| 43 |
+
self.input_conv = nn.Conv2d(in_dim, dim, 1)
|
| 44 |
+
|
| 45 |
+
self.out_channnels = dim
|
| 46 |
+
|
| 47 |
+
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default',
|
| 48 |
+
dilation=1, in_dim=None, groups=1, second_dilation=1):
|
| 49 |
+
conv_layer = get_conv_block_ctor(conv_kind)
|
| 50 |
+
|
| 51 |
+
conv_block = []
|
| 52 |
+
p = 0
|
| 53 |
+
if padding_type == 'reflect':
|
| 54 |
+
conv_block += [nn.ReflectionPad2d(dilation)]
|
| 55 |
+
elif padding_type == 'replicate':
|
| 56 |
+
conv_block += [nn.ReplicationPad2d(dilation)]
|
| 57 |
+
elif padding_type == 'zero':
|
| 58 |
+
p = dilation
|
| 59 |
+
else:
|
| 60 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
| 61 |
+
|
| 62 |
+
if in_dim is None:
|
| 63 |
+
in_dim = dim
|
| 64 |
+
|
| 65 |
+
conv_block += [conv_layer(in_dim, dim, kernel_size=3, padding=p, dilation=dilation),
|
| 66 |
+
norm_layer(dim),
|
| 67 |
+
activation]
|
| 68 |
+
if use_dropout:
|
| 69 |
+
conv_block += [nn.Dropout(0.5)]
|
| 70 |
+
|
| 71 |
+
p = 0
|
| 72 |
+
if padding_type == 'reflect':
|
| 73 |
+
conv_block += [nn.ReflectionPad2d(second_dilation)]
|
| 74 |
+
elif padding_type == 'replicate':
|
| 75 |
+
conv_block += [nn.ReplicationPad2d(second_dilation)]
|
| 76 |
+
elif padding_type == 'zero':
|
| 77 |
+
p = second_dilation
|
| 78 |
+
else:
|
| 79 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
| 80 |
+
conv_block += [conv_layer(dim, dim, kernel_size=3, padding=p, dilation=second_dilation, groups=groups),
|
| 81 |
+
norm_layer(dim)]
|
| 82 |
+
|
| 83 |
+
return nn.Sequential(*conv_block)
|
| 84 |
+
|
| 85 |
+
def forward(self, x):
|
| 86 |
+
x_before = x
|
| 87 |
+
if self.in_dim is not None:
|
| 88 |
+
x = self.input_conv(x)
|
| 89 |
+
out = x + self.conv_block(x_before)
|
| 90 |
+
return out
|
| 91 |
+
|
| 92 |
+
class ResnetBlock5x5(nn.Module):
|
| 93 |
+
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default',
|
| 94 |
+
dilation=1, in_dim=None, groups=1, second_dilation=None):
|
| 95 |
+
super(ResnetBlock5x5, self).__init__()
|
| 96 |
+
self.in_dim = in_dim
|
| 97 |
+
self.dim = dim
|
| 98 |
+
if second_dilation is None:
|
| 99 |
+
second_dilation = dilation
|
| 100 |
+
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout,
|
| 101 |
+
conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups,
|
| 102 |
+
second_dilation=second_dilation)
|
| 103 |
+
|
| 104 |
+
if self.in_dim is not None:
|
| 105 |
+
self.input_conv = nn.Conv2d(in_dim, dim, 1)
|
| 106 |
+
|
| 107 |
+
self.out_channnels = dim
|
| 108 |
+
|
| 109 |
+
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default',
|
| 110 |
+
dilation=1, in_dim=None, groups=1, second_dilation=1):
|
| 111 |
+
conv_layer = get_conv_block_ctor(conv_kind)
|
| 112 |
+
|
| 113 |
+
conv_block = []
|
| 114 |
+
p = 0
|
| 115 |
+
if padding_type == 'reflect':
|
| 116 |
+
conv_block += [nn.ReflectionPad2d(dilation * 2)]
|
| 117 |
+
elif padding_type == 'replicate':
|
| 118 |
+
conv_block += [nn.ReplicationPad2d(dilation * 2)]
|
| 119 |
+
elif padding_type == 'zero':
|
| 120 |
+
p = dilation * 2
|
| 121 |
+
else:
|
| 122 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
| 123 |
+
|
| 124 |
+
if in_dim is None:
|
| 125 |
+
in_dim = dim
|
| 126 |
+
|
| 127 |
+
conv_block += [conv_layer(in_dim, dim, kernel_size=5, padding=p, dilation=dilation),
|
| 128 |
+
norm_layer(dim),
|
| 129 |
+
activation]
|
| 130 |
+
if use_dropout:
|
| 131 |
+
conv_block += [nn.Dropout(0.5)]
|
| 132 |
+
|
| 133 |
+
p = 0
|
| 134 |
+
if padding_type == 'reflect':
|
| 135 |
+
conv_block += [nn.ReflectionPad2d(second_dilation * 2)]
|
| 136 |
+
elif padding_type == 'replicate':
|
| 137 |
+
conv_block += [nn.ReplicationPad2d(second_dilation * 2)]
|
| 138 |
+
elif padding_type == 'zero':
|
| 139 |
+
p = second_dilation * 2
|
| 140 |
+
else:
|
| 141 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
| 142 |
+
conv_block += [conv_layer(dim, dim, kernel_size=5, padding=p, dilation=second_dilation, groups=groups),
|
| 143 |
+
norm_layer(dim)]
|
| 144 |
+
|
| 145 |
+
return nn.Sequential(*conv_block)
|
| 146 |
+
|
| 147 |
+
def forward(self, x):
|
| 148 |
+
x_before = x
|
| 149 |
+
if self.in_dim is not None:
|
| 150 |
+
x = self.input_conv(x)
|
| 151 |
+
out = x + self.conv_block(x_before)
|
| 152 |
+
return out
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class MultidilatedResnetBlock(nn.Module):
|
| 156 |
+
def __init__(self, dim, padding_type, conv_layer, norm_layer, activation=nn.ReLU(True), use_dropout=False):
|
| 157 |
+
super().__init__()
|
| 158 |
+
self.conv_block = self.build_conv_block(dim, padding_type, conv_layer, norm_layer, activation, use_dropout)
|
| 159 |
+
|
| 160 |
+
def build_conv_block(self, dim, padding_type, conv_layer, norm_layer, activation, use_dropout, dilation=1):
|
| 161 |
+
conv_block = []
|
| 162 |
+
conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type),
|
| 163 |
+
norm_layer(dim),
|
| 164 |
+
activation]
|
| 165 |
+
if use_dropout:
|
| 166 |
+
conv_block += [nn.Dropout(0.5)]
|
| 167 |
+
|
| 168 |
+
conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type),
|
| 169 |
+
norm_layer(dim)]
|
| 170 |
+
|
| 171 |
+
return nn.Sequential(*conv_block)
|
| 172 |
+
|
| 173 |
+
def forward(self, x):
|
| 174 |
+
out = x + self.conv_block(x)
|
| 175 |
+
return out
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class MultiDilatedGlobalGenerator(nn.Module):
|
| 179 |
+
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3,
|
| 180 |
+
n_blocks=3, norm_layer=nn.BatchNorm2d,
|
| 181 |
+
padding_type='reflect', conv_kind='default',
|
| 182 |
+
deconv_kind='convtranspose', activation=nn.ReLU(True),
|
| 183 |
+
up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True),
|
| 184 |
+
add_out_act=True, max_features=1024, multidilation_kwargs={},
|
| 185 |
+
ffc_positions=None, ffc_kwargs={}):
|
| 186 |
+
assert (n_blocks >= 0)
|
| 187 |
+
super().__init__()
|
| 188 |
+
|
| 189 |
+
conv_layer = get_conv_block_ctor(conv_kind)
|
| 190 |
+
resnet_conv_layer = functools.partial(get_conv_block_ctor('multidilated'), **multidilation_kwargs)
|
| 191 |
+
norm_layer = get_norm_layer(norm_layer)
|
| 192 |
+
if affine is not None:
|
| 193 |
+
norm_layer = partial(norm_layer, affine=affine)
|
| 194 |
+
up_norm_layer = get_norm_layer(up_norm_layer)
|
| 195 |
+
if affine is not None:
|
| 196 |
+
up_norm_layer = partial(up_norm_layer, affine=affine)
|
| 197 |
+
|
| 198 |
+
model = [nn.ReflectionPad2d(3),
|
| 199 |
+
conv_layer(input_nc, ngf, kernel_size=7, padding=0),
|
| 200 |
+
norm_layer(ngf),
|
| 201 |
+
activation]
|
| 202 |
+
|
| 203 |
+
identity = Identity()
|
| 204 |
+
### downsample
|
| 205 |
+
for i in range(n_downsampling):
|
| 206 |
+
mult = 2 ** i
|
| 207 |
+
|
| 208 |
+
model += [conv_layer(min(max_features, ngf * mult),
|
| 209 |
+
min(max_features, ngf * mult * 2),
|
| 210 |
+
kernel_size=3, stride=2, padding=1),
|
| 211 |
+
norm_layer(min(max_features, ngf * mult * 2)),
|
| 212 |
+
activation]
|
| 213 |
+
|
| 214 |
+
mult = 2 ** n_downsampling
|
| 215 |
+
feats_num_bottleneck = min(max_features, ngf * mult)
|
| 216 |
+
|
| 217 |
+
### resnet blocks
|
| 218 |
+
for i in range(n_blocks):
|
| 219 |
+
if ffc_positions is not None and i in ffc_positions:
|
| 220 |
+
model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU,
|
| 221 |
+
inline=True, **ffc_kwargs)]
|
| 222 |
+
model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type,
|
| 223 |
+
conv_layer=resnet_conv_layer, activation=activation,
|
| 224 |
+
norm_layer=norm_layer)]
|
| 225 |
+
|
| 226 |
+
### upsample
|
| 227 |
+
for i in range(n_downsampling):
|
| 228 |
+
mult = 2 ** (n_downsampling - i)
|
| 229 |
+
model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features)
|
| 230 |
+
model += [nn.ReflectionPad2d(3),
|
| 231 |
+
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
| 232 |
+
if add_out_act:
|
| 233 |
+
model.append(get_activation('tanh' if add_out_act is True else add_out_act))
|
| 234 |
+
self.model = nn.Sequential(*model)
|
| 235 |
+
|
| 236 |
+
def forward(self, input):
|
| 237 |
+
return self.model(input)
|
| 238 |
+
|
| 239 |
+
class ConfigGlobalGenerator(nn.Module):
|
| 240 |
+
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3,
|
| 241 |
+
n_blocks=3, norm_layer=nn.BatchNorm2d,
|
| 242 |
+
padding_type='reflect', conv_kind='default',
|
| 243 |
+
deconv_kind='convtranspose', activation=nn.ReLU(True),
|
| 244 |
+
up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True),
|
| 245 |
+
add_out_act=True, max_features=1024,
|
| 246 |
+
manual_block_spec=[],
|
| 247 |
+
resnet_block_kind='multidilatedresnetblock',
|
| 248 |
+
resnet_conv_kind='multidilated',
|
| 249 |
+
resnet_dilation=1,
|
| 250 |
+
multidilation_kwargs={}):
|
| 251 |
+
assert (n_blocks >= 0)
|
| 252 |
+
super().__init__()
|
| 253 |
+
|
| 254 |
+
conv_layer = get_conv_block_ctor(conv_kind)
|
| 255 |
+
resnet_conv_layer = functools.partial(get_conv_block_ctor(resnet_conv_kind), **multidilation_kwargs)
|
| 256 |
+
norm_layer = get_norm_layer(norm_layer)
|
| 257 |
+
if affine is not None:
|
| 258 |
+
norm_layer = partial(norm_layer, affine=affine)
|
| 259 |
+
up_norm_layer = get_norm_layer(up_norm_layer)
|
| 260 |
+
if affine is not None:
|
| 261 |
+
up_norm_layer = partial(up_norm_layer, affine=affine)
|
| 262 |
+
|
| 263 |
+
model = [nn.ReflectionPad2d(3),
|
| 264 |
+
conv_layer(input_nc, ngf, kernel_size=7, padding=0),
|
| 265 |
+
norm_layer(ngf),
|
| 266 |
+
activation]
|
| 267 |
+
|
| 268 |
+
identity = Identity()
|
| 269 |
+
|
| 270 |
+
### downsample
|
| 271 |
+
for i in range(n_downsampling):
|
| 272 |
+
mult = 2 ** i
|
| 273 |
+
model += [conv_layer(min(max_features, ngf * mult),
|
| 274 |
+
min(max_features, ngf * mult * 2),
|
| 275 |
+
kernel_size=3, stride=2, padding=1),
|
| 276 |
+
norm_layer(min(max_features, ngf * mult * 2)),
|
| 277 |
+
activation]
|
| 278 |
+
|
| 279 |
+
mult = 2 ** n_downsampling
|
| 280 |
+
feats_num_bottleneck = min(max_features, ngf * mult)
|
| 281 |
+
|
| 282 |
+
if len(manual_block_spec) == 0:
|
| 283 |
+
manual_block_spec = [
|
| 284 |
+
DotDict(lambda : None, {
|
| 285 |
+
'n_blocks': n_blocks,
|
| 286 |
+
'use_default': True})
|
| 287 |
+
]
|
| 288 |
+
|
| 289 |
+
### resnet blocks
|
| 290 |
+
for block_spec in manual_block_spec:
|
| 291 |
+
def make_and_add_blocks(model, block_spec):
|
| 292 |
+
block_spec = DotDict(lambda : None, block_spec)
|
| 293 |
+
if not block_spec.use_default:
|
| 294 |
+
resnet_conv_layer = functools.partial(get_conv_block_ctor(block_spec.resnet_conv_kind), **block_spec.multidilation_kwargs)
|
| 295 |
+
resnet_conv_kind = block_spec.resnet_conv_kind
|
| 296 |
+
resnet_block_kind = block_spec.resnet_block_kind
|
| 297 |
+
if block_spec.resnet_dilation is not None:
|
| 298 |
+
resnet_dilation = block_spec.resnet_dilation
|
| 299 |
+
for i in range(block_spec.n_blocks):
|
| 300 |
+
if resnet_block_kind == "multidilatedresnetblock":
|
| 301 |
+
model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type,
|
| 302 |
+
conv_layer=resnet_conv_layer, activation=activation,
|
| 303 |
+
norm_layer=norm_layer)]
|
| 304 |
+
if resnet_block_kind == "resnetblock":
|
| 305 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,
|
| 306 |
+
conv_kind=resnet_conv_kind)]
|
| 307 |
+
if resnet_block_kind == "resnetblock5x5":
|
| 308 |
+
model += [ResnetBlock5x5(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,
|
| 309 |
+
conv_kind=resnet_conv_kind)]
|
| 310 |
+
if resnet_block_kind == "resnetblockdwdil":
|
| 311 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer,
|
| 312 |
+
conv_kind=resnet_conv_kind, dilation=resnet_dilation, second_dilation=resnet_dilation)]
|
| 313 |
+
make_and_add_blocks(model, block_spec)
|
| 314 |
+
|
| 315 |
+
### upsample
|
| 316 |
+
for i in range(n_downsampling):
|
| 317 |
+
mult = 2 ** (n_downsampling - i)
|
| 318 |
+
model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features)
|
| 319 |
+
model += [nn.ReflectionPad2d(3),
|
| 320 |
+
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
| 321 |
+
if add_out_act:
|
| 322 |
+
model.append(get_activation('tanh' if add_out_act is True else add_out_act))
|
| 323 |
+
self.model = nn.Sequential(*model)
|
| 324 |
+
|
| 325 |
+
def forward(self, input):
|
| 326 |
+
return self.model(input)
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs):
|
| 330 |
+
blocks = []
|
| 331 |
+
for i in range(dilated_blocks_n):
|
| 332 |
+
if dilation_block_kind == 'simple':
|
| 333 |
+
blocks.append(ResnetBlock(**dilated_block_kwargs, dilation=2 ** (i + 1)))
|
| 334 |
+
elif dilation_block_kind == 'multi':
|
| 335 |
+
blocks.append(MultidilatedResnetBlock(**dilated_block_kwargs))
|
| 336 |
+
else:
|
| 337 |
+
raise ValueError(f'dilation_block_kind could not be "{dilation_block_kind}"')
|
| 338 |
+
return blocks
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
class GlobalGenerator(nn.Module):
|
| 342 |
+
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
|
| 343 |
+
padding_type='reflect', conv_kind='default', activation=nn.ReLU(True),
|
| 344 |
+
up_norm_layer=nn.BatchNorm2d, affine=None,
|
| 345 |
+
up_activation=nn.ReLU(True), dilated_blocks_n=0, dilated_blocks_n_start=0,
|
| 346 |
+
dilated_blocks_n_middle=0,
|
| 347 |
+
add_out_act=True,
|
| 348 |
+
max_features=1024, is_resblock_depthwise=False,
|
| 349 |
+
ffc_positions=None, ffc_kwargs={}, dilation=1, second_dilation=None,
|
| 350 |
+
dilation_block_kind='simple', multidilation_kwargs={}):
|
| 351 |
+
assert (n_blocks >= 0)
|
| 352 |
+
super().__init__()
|
| 353 |
+
|
| 354 |
+
conv_layer = get_conv_block_ctor(conv_kind)
|
| 355 |
+
norm_layer = get_norm_layer(norm_layer)
|
| 356 |
+
if affine is not None:
|
| 357 |
+
norm_layer = partial(norm_layer, affine=affine)
|
| 358 |
+
up_norm_layer = get_norm_layer(up_norm_layer)
|
| 359 |
+
if affine is not None:
|
| 360 |
+
up_norm_layer = partial(up_norm_layer, affine=affine)
|
| 361 |
+
|
| 362 |
+
if ffc_positions is not None:
|
| 363 |
+
ffc_positions = collections.Counter(ffc_positions)
|
| 364 |
+
|
| 365 |
+
model = [nn.ReflectionPad2d(3),
|
| 366 |
+
conv_layer(input_nc, ngf, kernel_size=7, padding=0),
|
| 367 |
+
norm_layer(ngf),
|
| 368 |
+
activation]
|
| 369 |
+
|
| 370 |
+
identity = Identity()
|
| 371 |
+
### downsample
|
| 372 |
+
for i in range(n_downsampling):
|
| 373 |
+
mult = 2 ** i
|
| 374 |
+
|
| 375 |
+
model += [conv_layer(min(max_features, ngf * mult),
|
| 376 |
+
min(max_features, ngf * mult * 2),
|
| 377 |
+
kernel_size=3, stride=2, padding=1),
|
| 378 |
+
norm_layer(min(max_features, ngf * mult * 2)),
|
| 379 |
+
activation]
|
| 380 |
+
|
| 381 |
+
mult = 2 ** n_downsampling
|
| 382 |
+
feats_num_bottleneck = min(max_features, ngf * mult)
|
| 383 |
+
|
| 384 |
+
dilated_block_kwargs = dict(dim=feats_num_bottleneck, padding_type=padding_type,
|
| 385 |
+
activation=activation, norm_layer=norm_layer)
|
| 386 |
+
if dilation_block_kind == 'simple':
|
| 387 |
+
dilated_block_kwargs['conv_kind'] = conv_kind
|
| 388 |
+
elif dilation_block_kind == 'multi':
|
| 389 |
+
dilated_block_kwargs['conv_layer'] = functools.partial(
|
| 390 |
+
get_conv_block_ctor('multidilated'), **multidilation_kwargs)
|
| 391 |
+
|
| 392 |
+
# dilated blocks at the start of the bottleneck sausage
|
| 393 |
+
if dilated_blocks_n_start is not None and dilated_blocks_n_start > 0:
|
| 394 |
+
model += make_dil_blocks(dilated_blocks_n_start, dilation_block_kind, dilated_block_kwargs)
|
| 395 |
+
|
| 396 |
+
# resnet blocks
|
| 397 |
+
for i in range(n_blocks):
|
| 398 |
+
# dilated blocks at the middle of the bottleneck sausage
|
| 399 |
+
if i == n_blocks // 2 and dilated_blocks_n_middle is not None and dilated_blocks_n_middle > 0:
|
| 400 |
+
model += make_dil_blocks(dilated_blocks_n_middle, dilation_block_kind, dilated_block_kwargs)
|
| 401 |
+
|
| 402 |
+
if ffc_positions is not None and i in ffc_positions:
|
| 403 |
+
for _ in range(ffc_positions[i]): # same position can occur more than once
|
| 404 |
+
model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU,
|
| 405 |
+
inline=True, **ffc_kwargs)]
|
| 406 |
+
|
| 407 |
+
if is_resblock_depthwise:
|
| 408 |
+
resblock_groups = feats_num_bottleneck
|
| 409 |
+
else:
|
| 410 |
+
resblock_groups = 1
|
| 411 |
+
|
| 412 |
+
model += [ResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation=activation,
|
| 413 |
+
norm_layer=norm_layer, conv_kind=conv_kind, groups=resblock_groups,
|
| 414 |
+
dilation=dilation, second_dilation=second_dilation)]
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
# dilated blocks at the end of the bottleneck sausage
|
| 418 |
+
if dilated_blocks_n is not None and dilated_blocks_n > 0:
|
| 419 |
+
model += make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs)
|
| 420 |
+
|
| 421 |
+
# upsample
|
| 422 |
+
for i in range(n_downsampling):
|
| 423 |
+
mult = 2 ** (n_downsampling - i)
|
| 424 |
+
model += [nn.ConvTranspose2d(min(max_features, ngf * mult),
|
| 425 |
+
min(max_features, int(ngf * mult / 2)),
|
| 426 |
+
kernel_size=3, stride=2, padding=1, output_padding=1),
|
| 427 |
+
up_norm_layer(min(max_features, int(ngf * mult / 2))),
|
| 428 |
+
up_activation]
|
| 429 |
+
model += [nn.ReflectionPad2d(3),
|
| 430 |
+
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
| 431 |
+
if add_out_act:
|
| 432 |
+
model.append(get_activation('tanh' if add_out_act is True else add_out_act))
|
| 433 |
+
self.model = nn.Sequential(*model)
|
| 434 |
+
|
| 435 |
+
def forward(self, input):
|
| 436 |
+
return self.model(input)
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
class GlobalGeneratorGated(GlobalGenerator):
|
| 440 |
+
def __init__(self, *args, **kwargs):
|
| 441 |
+
real_kwargs=dict(
|
| 442 |
+
conv_kind='gated_bn_relu',
|
| 443 |
+
activation=nn.Identity(),
|
| 444 |
+
norm_layer=nn.Identity
|
| 445 |
+
)
|
| 446 |
+
real_kwargs.update(kwargs)
|
| 447 |
+
super().__init__(*args, **real_kwargs)
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
class GlobalGeneratorFromSuperChannels(nn.Module):
|
| 451 |
+
def __init__(self, input_nc, output_nc, n_downsampling, n_blocks, super_channels, norm_layer="bn", padding_type='reflect', add_out_act=True):
|
| 452 |
+
super().__init__()
|
| 453 |
+
self.n_downsampling = n_downsampling
|
| 454 |
+
norm_layer = get_norm_layer(norm_layer)
|
| 455 |
+
if type(norm_layer) == functools.partial:
|
| 456 |
+
use_bias = (norm_layer.func == nn.InstanceNorm2d)
|
| 457 |
+
else:
|
| 458 |
+
use_bias = (norm_layer == nn.InstanceNorm2d)
|
| 459 |
+
|
| 460 |
+
channels = self.convert_super_channels(super_channels)
|
| 461 |
+
self.channels = channels
|
| 462 |
+
|
| 463 |
+
model = [nn.ReflectionPad2d(3),
|
| 464 |
+
nn.Conv2d(input_nc, channels[0], kernel_size=7, padding=0, bias=use_bias),
|
| 465 |
+
norm_layer(channels[0]),
|
| 466 |
+
nn.ReLU(True)]
|
| 467 |
+
|
| 468 |
+
for i in range(n_downsampling): # add downsampling layers
|
| 469 |
+
mult = 2 ** i
|
| 470 |
+
model += [nn.Conv2d(channels[0+i], channels[1+i], kernel_size=3, stride=2, padding=1, bias=use_bias),
|
| 471 |
+
norm_layer(channels[1+i]),
|
| 472 |
+
nn.ReLU(True)]
|
| 473 |
+
|
| 474 |
+
mult = 2 ** n_downsampling
|
| 475 |
+
|
| 476 |
+
n_blocks1 = n_blocks // 3
|
| 477 |
+
n_blocks2 = n_blocks1
|
| 478 |
+
n_blocks3 = n_blocks - n_blocks1 - n_blocks2
|
| 479 |
+
|
| 480 |
+
for i in range(n_blocks1):
|
| 481 |
+
c = n_downsampling
|
| 482 |
+
dim = channels[c]
|
| 483 |
+
model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer)]
|
| 484 |
+
|
| 485 |
+
for i in range(n_blocks2):
|
| 486 |
+
c = n_downsampling+1
|
| 487 |
+
dim = channels[c]
|
| 488 |
+
kwargs = {}
|
| 489 |
+
if i == 0:
|
| 490 |
+
kwargs = {"in_dim": channels[c-1]}
|
| 491 |
+
model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)]
|
| 492 |
+
|
| 493 |
+
for i in range(n_blocks3):
|
| 494 |
+
c = n_downsampling+2
|
| 495 |
+
dim = channels[c]
|
| 496 |
+
kwargs = {}
|
| 497 |
+
if i == 0:
|
| 498 |
+
kwargs = {"in_dim": channels[c-1]}
|
| 499 |
+
model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)]
|
| 500 |
+
|
| 501 |
+
for i in range(n_downsampling): # add upsampling layers
|
| 502 |
+
mult = 2 ** (n_downsampling - i)
|
| 503 |
+
model += [nn.ConvTranspose2d(channels[n_downsampling+3+i],
|
| 504 |
+
channels[n_downsampling+3+i+1],
|
| 505 |
+
kernel_size=3, stride=2,
|
| 506 |
+
padding=1, output_padding=1,
|
| 507 |
+
bias=use_bias),
|
| 508 |
+
norm_layer(channels[n_downsampling+3+i+1]),
|
| 509 |
+
nn.ReLU(True)]
|
| 510 |
+
model += [nn.ReflectionPad2d(3)]
|
| 511 |
+
model += [nn.Conv2d(channels[2*n_downsampling+3], output_nc, kernel_size=7, padding=0)]
|
| 512 |
+
|
| 513 |
+
if add_out_act:
|
| 514 |
+
model.append(get_activation('tanh' if add_out_act is True else add_out_act))
|
| 515 |
+
self.model = nn.Sequential(*model)
|
| 516 |
+
|
| 517 |
+
def convert_super_channels(self, super_channels):
|
| 518 |
+
n_downsampling = self.n_downsampling
|
| 519 |
+
result = []
|
| 520 |
+
cnt = 0
|
| 521 |
+
|
| 522 |
+
if n_downsampling == 2:
|
| 523 |
+
N1 = 10
|
| 524 |
+
elif n_downsampling == 3:
|
| 525 |
+
N1 = 13
|
| 526 |
+
else:
|
| 527 |
+
raise NotImplementedError
|
| 528 |
+
|
| 529 |
+
for i in range(0, N1):
|
| 530 |
+
if i in [1,4,7,10]:
|
| 531 |
+
channel = super_channels[cnt] * (2 ** cnt)
|
| 532 |
+
config = {'channel': channel}
|
| 533 |
+
result.append(channel)
|
| 534 |
+
logging.info(f"Downsample channels {result[-1]}")
|
| 535 |
+
cnt += 1
|
| 536 |
+
|
| 537 |
+
for i in range(3):
|
| 538 |
+
for counter, j in enumerate(range(N1 + i * 3, N1 + 3 + i * 3)):
|
| 539 |
+
if len(super_channels) == 6:
|
| 540 |
+
channel = super_channels[3] * 4
|
| 541 |
+
else:
|
| 542 |
+
channel = super_channels[i + 3] * 4
|
| 543 |
+
config = {'channel': channel}
|
| 544 |
+
if counter == 0:
|
| 545 |
+
result.append(channel)
|
| 546 |
+
logging.info(f"Bottleneck channels {result[-1]}")
|
| 547 |
+
cnt = 2
|
| 548 |
+
|
| 549 |
+
for i in range(N1+9, N1+21):
|
| 550 |
+
if i in [22, 25,28]:
|
| 551 |
+
cnt -= 1
|
| 552 |
+
if len(super_channels) == 6:
|
| 553 |
+
channel = super_channels[5 - cnt] * (2 ** cnt)
|
| 554 |
+
else:
|
| 555 |
+
channel = super_channels[7 - cnt] * (2 ** cnt)
|
| 556 |
+
result.append(int(channel))
|
| 557 |
+
logging.info(f"Upsample channels {result[-1]}")
|
| 558 |
+
return result
|
| 559 |
+
|
| 560 |
+
def forward(self, input):
|
| 561 |
+
return self.model(input)
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
# Defines the PatchGAN discriminator with the specified arguments.
|
| 565 |
+
class NLayerDiscriminator(BaseDiscriminator):
|
| 566 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,):
|
| 567 |
+
super().__init__()
|
| 568 |
+
self.n_layers = n_layers
|
| 569 |
+
|
| 570 |
+
kw = 4
|
| 571 |
+
padw = int(np.ceil((kw-1.0)/2))
|
| 572 |
+
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
|
| 573 |
+
nn.LeakyReLU(0.2, True)]]
|
| 574 |
+
|
| 575 |
+
nf = ndf
|
| 576 |
+
for n in range(1, n_layers):
|
| 577 |
+
nf_prev = nf
|
| 578 |
+
nf = min(nf * 2, 512)
|
| 579 |
+
|
| 580 |
+
cur_model = []
|
| 581 |
+
cur_model += [
|
| 582 |
+
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
|
| 583 |
+
norm_layer(nf),
|
| 584 |
+
nn.LeakyReLU(0.2, True)
|
| 585 |
+
]
|
| 586 |
+
sequence.append(cur_model)
|
| 587 |
+
|
| 588 |
+
nf_prev = nf
|
| 589 |
+
nf = min(nf * 2, 512)
|
| 590 |
+
|
| 591 |
+
cur_model = []
|
| 592 |
+
cur_model += [
|
| 593 |
+
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
|
| 594 |
+
norm_layer(nf),
|
| 595 |
+
nn.LeakyReLU(0.2, True)
|
| 596 |
+
]
|
| 597 |
+
sequence.append(cur_model)
|
| 598 |
+
|
| 599 |
+
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
| 600 |
+
|
| 601 |
+
for n in range(len(sequence)):
|
| 602 |
+
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
|
| 603 |
+
|
| 604 |
+
def get_all_activations(self, x):
|
| 605 |
+
res = [x]
|
| 606 |
+
for n in range(self.n_layers + 2):
|
| 607 |
+
model = getattr(self, 'model' + str(n))
|
| 608 |
+
res.append(model(res[-1]))
|
| 609 |
+
return res[1:]
|
| 610 |
+
|
| 611 |
+
def forward(self, x):
|
| 612 |
+
act = self.get_all_activations(x)
|
| 613 |
+
return act[-1], act[:-1]
|
| 614 |
+
|
| 615 |
+
|
| 616 |
+
class MultidilatedNLayerDiscriminator(BaseDiscriminator):
|
| 617 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, multidilation_kwargs={}):
|
| 618 |
+
super().__init__()
|
| 619 |
+
self.n_layers = n_layers
|
| 620 |
+
|
| 621 |
+
kw = 4
|
| 622 |
+
padw = int(np.ceil((kw-1.0)/2))
|
| 623 |
+
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
|
| 624 |
+
nn.LeakyReLU(0.2, True)]]
|
| 625 |
+
|
| 626 |
+
nf = ndf
|
| 627 |
+
for n in range(1, n_layers):
|
| 628 |
+
nf_prev = nf
|
| 629 |
+
nf = min(nf * 2, 512)
|
| 630 |
+
|
| 631 |
+
cur_model = []
|
| 632 |
+
cur_model += [
|
| 633 |
+
MultidilatedConv(nf_prev, nf, kernel_size=kw, stride=2, padding=[2, 3], **multidilation_kwargs),
|
| 634 |
+
norm_layer(nf),
|
| 635 |
+
nn.LeakyReLU(0.2, True)
|
| 636 |
+
]
|
| 637 |
+
sequence.append(cur_model)
|
| 638 |
+
|
| 639 |
+
nf_prev = nf
|
| 640 |
+
nf = min(nf * 2, 512)
|
| 641 |
+
|
| 642 |
+
cur_model = []
|
| 643 |
+
cur_model += [
|
| 644 |
+
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
|
| 645 |
+
norm_layer(nf),
|
| 646 |
+
nn.LeakyReLU(0.2, True)
|
| 647 |
+
]
|
| 648 |
+
sequence.append(cur_model)
|
| 649 |
+
|
| 650 |
+
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
|
| 651 |
+
|
| 652 |
+
for n in range(len(sequence)):
|
| 653 |
+
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
|
| 654 |
+
|
| 655 |
+
def get_all_activations(self, x):
|
| 656 |
+
res = [x]
|
| 657 |
+
for n in range(self.n_layers + 2):
|
| 658 |
+
model = getattr(self, 'model' + str(n))
|
| 659 |
+
res.append(model(res[-1]))
|
| 660 |
+
return res[1:]
|
| 661 |
+
|
| 662 |
+
def forward(self, x):
|
| 663 |
+
act = self.get_all_activations(x)
|
| 664 |
+
return act[-1], act[:-1]
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
class NLayerDiscriminatorAsGen(NLayerDiscriminator):
|
| 668 |
+
def forward(self, x):
|
| 669 |
+
return super().forward(x)[0]
|
RAVE-main/annotator/lama/saicinpainting/training/modules/spatial_transform.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from kornia.geometry.transform import rotate
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class LearnableSpatialTransformWrapper(nn.Module):
|
| 8 |
+
def __init__(self, impl, pad_coef=0.5, angle_init_range=80, train_angle=True):
|
| 9 |
+
super().__init__()
|
| 10 |
+
self.impl = impl
|
| 11 |
+
self.angle = torch.rand(1) * angle_init_range
|
| 12 |
+
if train_angle:
|
| 13 |
+
self.angle = nn.Parameter(self.angle, requires_grad=True)
|
| 14 |
+
self.pad_coef = pad_coef
|
| 15 |
+
|
| 16 |
+
def forward(self, x):
|
| 17 |
+
if torch.is_tensor(x):
|
| 18 |
+
return self.inverse_transform(self.impl(self.transform(x)), x)
|
| 19 |
+
elif isinstance(x, tuple):
|
| 20 |
+
x_trans = tuple(self.transform(elem) for elem in x)
|
| 21 |
+
y_trans = self.impl(x_trans)
|
| 22 |
+
return tuple(self.inverse_transform(elem, orig_x) for elem, orig_x in zip(y_trans, x))
|
| 23 |
+
else:
|
| 24 |
+
raise ValueError(f'Unexpected input type {type(x)}')
|
| 25 |
+
|
| 26 |
+
def transform(self, x):
|
| 27 |
+
height, width = x.shape[2:]
|
| 28 |
+
pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef)
|
| 29 |
+
x_padded = F.pad(x, [pad_w, pad_w, pad_h, pad_h], mode='reflect')
|
| 30 |
+
x_padded_rotated = rotate(x_padded, angle=self.angle.to(x_padded))
|
| 31 |
+
return x_padded_rotated
|
| 32 |
+
|
| 33 |
+
def inverse_transform(self, y_padded_rotated, orig_x):
|
| 34 |
+
height, width = orig_x.shape[2:]
|
| 35 |
+
pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef)
|
| 36 |
+
|
| 37 |
+
y_padded = rotate(y_padded_rotated, angle=-self.angle.to(y_padded_rotated))
|
| 38 |
+
y_height, y_width = y_padded.shape[2:]
|
| 39 |
+
y = y_padded[:, :, pad_h : y_height - pad_h, pad_w : y_width - pad_w]
|
| 40 |
+
return y
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
if __name__ == '__main__':
|
| 44 |
+
layer = LearnableSpatialTransformWrapper(nn.Identity())
|
| 45 |
+
x = torch.arange(2* 3 * 15 * 15).view(2, 3, 15, 15).float()
|
| 46 |
+
y = layer(x)
|
| 47 |
+
assert x.shape == y.shape
|
| 48 |
+
assert torch.allclose(x[:, :, 1:, 1:][:, :, :-1, :-1], y[:, :, 1:, 1:][:, :, :-1, :-1])
|
| 49 |
+
print('all ok')
|
RAVE-main/annotator/lama/saicinpainting/training/modules/squeeze_excitation.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class SELayer(nn.Module):
|
| 5 |
+
def __init__(self, channel, reduction=16):
|
| 6 |
+
super(SELayer, self).__init__()
|
| 7 |
+
self.avg_pool = nn.AdaptiveAvgPool2d(1)
|
| 8 |
+
self.fc = nn.Sequential(
|
| 9 |
+
nn.Linear(channel, channel // reduction, bias=False),
|
| 10 |
+
nn.ReLU(inplace=True),
|
| 11 |
+
nn.Linear(channel // reduction, channel, bias=False),
|
| 12 |
+
nn.Sigmoid()
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
def forward(self, x):
|
| 16 |
+
b, c, _, _ = x.size()
|
| 17 |
+
y = self.avg_pool(x).view(b, c)
|
| 18 |
+
y = self.fc(y).view(b, c, 1, 1)
|
| 19 |
+
res = x * y.expand_as(x)
|
| 20 |
+
return res
|
RAVE-main/annotator/lama/saicinpainting/training/trainers/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import torch
|
| 3 |
+
from annotator.lama.saicinpainting.training.trainers.default import DefaultInpaintingTrainingModule
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def get_training_model_class(kind):
|
| 7 |
+
if kind == 'default':
|
| 8 |
+
return DefaultInpaintingTrainingModule
|
| 9 |
+
|
| 10 |
+
raise ValueError(f'Unknown trainer module {kind}')
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def make_training_model(config):
|
| 14 |
+
kind = config.training_model.kind
|
| 15 |
+
kwargs = dict(config.training_model)
|
| 16 |
+
kwargs.pop('kind')
|
| 17 |
+
kwargs['use_ddp'] = config.trainer.kwargs.get('accelerator', None) == 'ddp'
|
| 18 |
+
|
| 19 |
+
logging.info(f'Make training model {kind}')
|
| 20 |
+
|
| 21 |
+
cls = get_training_model_class(kind)
|
| 22 |
+
return cls(config, **kwargs)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def load_checkpoint(train_config, path, map_location='cuda', strict=True):
|
| 26 |
+
model = make_training_model(train_config).generator
|
| 27 |
+
state = torch.load(path, map_location=map_location)
|
| 28 |
+
model.load_state_dict(state, strict=strict)
|
| 29 |
+
return model
|
RAVE-main/annotator/lama/saicinpainting/training/trainers/base.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import logging
|
| 3 |
+
from typing import Dict, Tuple
|
| 4 |
+
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import pytorch_lightning as ptl
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
import torch.nn.functional as F
|
| 10 |
+
# from torch.utils.data import DistributedSampler
|
| 11 |
+
|
| 12 |
+
# from annotator.lama.saicinpainting.evaluation import make_evaluator
|
| 13 |
+
# from annotator.lama.saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader
|
| 14 |
+
# from annotator.lama.saicinpainting.training.losses.adversarial import make_discrim_loss
|
| 15 |
+
# from annotator.lama.saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL
|
| 16 |
+
from annotator.lama.saicinpainting.training.modules import make_generator #, make_discriminator
|
| 17 |
+
# from annotator.lama.saicinpainting.training.visualizers import make_visualizer
|
| 18 |
+
from annotator.lama.saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \
|
| 19 |
+
get_has_ddp_rank
|
| 20 |
+
|
| 21 |
+
LOGGER = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def make_optimizer(parameters, kind='adamw', **kwargs):
|
| 25 |
+
if kind == 'adam':
|
| 26 |
+
optimizer_class = torch.optim.Adam
|
| 27 |
+
elif kind == 'adamw':
|
| 28 |
+
optimizer_class = torch.optim.AdamW
|
| 29 |
+
else:
|
| 30 |
+
raise ValueError(f'Unknown optimizer kind {kind}')
|
| 31 |
+
return optimizer_class(parameters, **kwargs)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999):
|
| 35 |
+
with torch.no_grad():
|
| 36 |
+
res_params = dict(result.named_parameters())
|
| 37 |
+
new_params = dict(new_iterate_model.named_parameters())
|
| 38 |
+
|
| 39 |
+
for k in res_params.keys():
|
| 40 |
+
res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=1 - decay)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'):
|
| 44 |
+
batch_size, _, height, width = base_tensor.shape
|
| 45 |
+
cur_height, cur_width = height, width
|
| 46 |
+
result = []
|
| 47 |
+
align_corners = False if scale_mode in ('bilinear', 'bicubic') else None
|
| 48 |
+
for _ in range(scales):
|
| 49 |
+
cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device)
|
| 50 |
+
cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners)
|
| 51 |
+
result.append(cur_sample_scaled)
|
| 52 |
+
cur_height //= 2
|
| 53 |
+
cur_width //= 2
|
| 54 |
+
return torch.cat(result, dim=1)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class BaseInpaintingTrainingModule(ptl.LightningModule):
|
| 58 |
+
def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100,
|
| 59 |
+
average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000,
|
| 60 |
+
average_generator_period=10, store_discr_outputs_for_vis=False,
|
| 61 |
+
**kwargs):
|
| 62 |
+
super().__init__(*args, **kwargs)
|
| 63 |
+
LOGGER.info('BaseInpaintingTrainingModule init called')
|
| 64 |
+
|
| 65 |
+
self.config = config
|
| 66 |
+
|
| 67 |
+
self.generator = make_generator(config, **self.config.generator)
|
| 68 |
+
self.use_ddp = use_ddp
|
| 69 |
+
|
| 70 |
+
if not get_has_ddp_rank():
|
| 71 |
+
LOGGER.info(f'Generator\n{self.generator}')
|
| 72 |
+
|
| 73 |
+
# if not predict_only:
|
| 74 |
+
# self.save_hyperparameters(self.config)
|
| 75 |
+
# self.discriminator = make_discriminator(**self.config.discriminator)
|
| 76 |
+
# self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial)
|
| 77 |
+
# self.visualizer = make_visualizer(**self.config.visualizer)
|
| 78 |
+
# self.val_evaluator = make_evaluator(**self.config.evaluator)
|
| 79 |
+
# self.test_evaluator = make_evaluator(**self.config.evaluator)
|
| 80 |
+
#
|
| 81 |
+
# if not get_has_ddp_rank():
|
| 82 |
+
# LOGGER.info(f'Discriminator\n{self.discriminator}')
|
| 83 |
+
#
|
| 84 |
+
# extra_val = self.config.data.get('extra_val', ())
|
| 85 |
+
# if extra_val:
|
| 86 |
+
# self.extra_val_titles = list(extra_val)
|
| 87 |
+
# self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator)
|
| 88 |
+
# for k in extra_val})
|
| 89 |
+
# else:
|
| 90 |
+
# self.extra_evaluators = {}
|
| 91 |
+
#
|
| 92 |
+
# self.average_generator = average_generator
|
| 93 |
+
# self.generator_avg_beta = generator_avg_beta
|
| 94 |
+
# self.average_generator_start_step = average_generator_start_step
|
| 95 |
+
# self.average_generator_period = average_generator_period
|
| 96 |
+
# self.generator_average = None
|
| 97 |
+
# self.last_generator_averaging_step = -1
|
| 98 |
+
# self.store_discr_outputs_for_vis = store_discr_outputs_for_vis
|
| 99 |
+
#
|
| 100 |
+
# if self.config.losses.get("l1", {"weight_known": 0})['weight_known'] > 0:
|
| 101 |
+
# self.loss_l1 = nn.L1Loss(reduction='none')
|
| 102 |
+
#
|
| 103 |
+
# if self.config.losses.get("mse", {"weight": 0})['weight'] > 0:
|
| 104 |
+
# self.loss_mse = nn.MSELoss(reduction='none')
|
| 105 |
+
#
|
| 106 |
+
# if self.config.losses.perceptual.weight > 0:
|
| 107 |
+
# self.loss_pl = PerceptualLoss()
|
| 108 |
+
#
|
| 109 |
+
# # if self.config.losses.get("resnet_pl", {"weight": 0})['weight'] > 0:
|
| 110 |
+
# # self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl)
|
| 111 |
+
# # else:
|
| 112 |
+
# # self.loss_resnet_pl = None
|
| 113 |
+
#
|
| 114 |
+
# self.loss_resnet_pl = None
|
| 115 |
+
|
| 116 |
+
self.visualize_each_iters = visualize_each_iters
|
| 117 |
+
LOGGER.info('BaseInpaintingTrainingModule init done')
|
| 118 |
+
|
| 119 |
+
def configure_optimizers(self):
|
| 120 |
+
discriminator_params = list(self.discriminator.parameters())
|
| 121 |
+
return [
|
| 122 |
+
dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)),
|
| 123 |
+
dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator)),
|
| 124 |
+
]
|
| 125 |
+
|
| 126 |
+
def train_dataloader(self):
|
| 127 |
+
kwargs = dict(self.config.data.train)
|
| 128 |
+
if self.use_ddp:
|
| 129 |
+
kwargs['ddp_kwargs'] = dict(num_replicas=self.trainer.num_nodes * self.trainer.num_processes,
|
| 130 |
+
rank=self.trainer.global_rank,
|
| 131 |
+
shuffle=True)
|
| 132 |
+
dataloader = make_default_train_dataloader(**self.config.data.train)
|
| 133 |
+
return dataloader
|
| 134 |
+
|
| 135 |
+
def val_dataloader(self):
|
| 136 |
+
res = [make_default_val_dataloader(**self.config.data.val)]
|
| 137 |
+
|
| 138 |
+
if self.config.data.visual_test is not None:
|
| 139 |
+
res = res + [make_default_val_dataloader(**self.config.data.visual_test)]
|
| 140 |
+
else:
|
| 141 |
+
res = res + res
|
| 142 |
+
|
| 143 |
+
extra_val = self.config.data.get('extra_val', ())
|
| 144 |
+
if extra_val:
|
| 145 |
+
res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles]
|
| 146 |
+
|
| 147 |
+
return res
|
| 148 |
+
|
| 149 |
+
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
| 150 |
+
self._is_training_step = True
|
| 151 |
+
return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx)
|
| 152 |
+
|
| 153 |
+
def validation_step(self, batch, batch_idx, dataloader_idx):
|
| 154 |
+
extra_val_key = None
|
| 155 |
+
if dataloader_idx == 0:
|
| 156 |
+
mode = 'val'
|
| 157 |
+
elif dataloader_idx == 1:
|
| 158 |
+
mode = 'test'
|
| 159 |
+
else:
|
| 160 |
+
mode = 'extra_val'
|
| 161 |
+
extra_val_key = self.extra_val_titles[dataloader_idx - 2]
|
| 162 |
+
self._is_training_step = False
|
| 163 |
+
return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key)
|
| 164 |
+
|
| 165 |
+
def training_step_end(self, batch_parts_outputs):
|
| 166 |
+
if self.training and self.average_generator \
|
| 167 |
+
and self.global_step >= self.average_generator_start_step \
|
| 168 |
+
and self.global_step >= self.last_generator_averaging_step + self.average_generator_period:
|
| 169 |
+
if self.generator_average is None:
|
| 170 |
+
self.generator_average = copy.deepcopy(self.generator)
|
| 171 |
+
else:
|
| 172 |
+
update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta)
|
| 173 |
+
self.last_generator_averaging_step = self.global_step
|
| 174 |
+
|
| 175 |
+
full_loss = (batch_parts_outputs['loss'].mean()
|
| 176 |
+
if torch.is_tensor(batch_parts_outputs['loss']) # loss is not tensor when no discriminator used
|
| 177 |
+
else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True))
|
| 178 |
+
log_info = {k: v.mean() for k, v in batch_parts_outputs['log_info'].items()}
|
| 179 |
+
self.log_dict(log_info, on_step=True, on_epoch=False)
|
| 180 |
+
return full_loss
|
| 181 |
+
|
| 182 |
+
def validation_epoch_end(self, outputs):
|
| 183 |
+
outputs = [step_out for out_group in outputs for step_out in out_group]
|
| 184 |
+
averaged_logs = average_dicts(step_out['log_info'] for step_out in outputs)
|
| 185 |
+
self.log_dict({k: v.mean() for k, v in averaged_logs.items()})
|
| 186 |
+
|
| 187 |
+
pd.set_option('display.max_columns', 500)
|
| 188 |
+
pd.set_option('display.width', 1000)
|
| 189 |
+
|
| 190 |
+
# standard validation
|
| 191 |
+
val_evaluator_states = [s['val_evaluator_state'] for s in outputs if 'val_evaluator_state' in s]
|
| 192 |
+
val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states)
|
| 193 |
+
val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0)
|
| 194 |
+
val_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
|
| 195 |
+
LOGGER.info(f'Validation metrics after epoch #{self.current_epoch}, '
|
| 196 |
+
f'total {self.global_step} iterations:\n{val_evaluator_res_df}')
|
| 197 |
+
|
| 198 |
+
for k, v in flatten_dict(val_evaluator_res).items():
|
| 199 |
+
self.log(f'val_{k}', v)
|
| 200 |
+
|
| 201 |
+
# standard visual test
|
| 202 |
+
test_evaluator_states = [s['test_evaluator_state'] for s in outputs
|
| 203 |
+
if 'test_evaluator_state' in s]
|
| 204 |
+
test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states)
|
| 205 |
+
test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0)
|
| 206 |
+
test_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
|
| 207 |
+
LOGGER.info(f'Test metrics after epoch #{self.current_epoch}, '
|
| 208 |
+
f'total {self.global_step} iterations:\n{test_evaluator_res_df}')
|
| 209 |
+
|
| 210 |
+
for k, v in flatten_dict(test_evaluator_res).items():
|
| 211 |
+
self.log(f'test_{k}', v)
|
| 212 |
+
|
| 213 |
+
# extra validations
|
| 214 |
+
if self.extra_evaluators:
|
| 215 |
+
for cur_eval_title, cur_evaluator in self.extra_evaluators.items():
|
| 216 |
+
cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state'
|
| 217 |
+
cur_states = [s[cur_state_key] for s in outputs if cur_state_key in s]
|
| 218 |
+
cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states)
|
| 219 |
+
cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0)
|
| 220 |
+
cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True)
|
| 221 |
+
LOGGER.info(f'Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, '
|
| 222 |
+
f'total {self.global_step} iterations:\n{cur_evaluator_res_df}')
|
| 223 |
+
for k, v in flatten_dict(cur_evaluator_res).items():
|
| 224 |
+
self.log(f'extra_val_{cur_eval_title}_{k}', v)
|
| 225 |
+
|
| 226 |
+
def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None):
|
| 227 |
+
if optimizer_idx == 0: # step for generator
|
| 228 |
+
set_requires_grad(self.generator, True)
|
| 229 |
+
set_requires_grad(self.discriminator, False)
|
| 230 |
+
elif optimizer_idx == 1: # step for discriminator
|
| 231 |
+
set_requires_grad(self.generator, False)
|
| 232 |
+
set_requires_grad(self.discriminator, True)
|
| 233 |
+
|
| 234 |
+
batch = self(batch)
|
| 235 |
+
|
| 236 |
+
total_loss = 0
|
| 237 |
+
metrics = {}
|
| 238 |
+
|
| 239 |
+
if optimizer_idx is None or optimizer_idx == 0: # step for generator
|
| 240 |
+
total_loss, metrics = self.generator_loss(batch)
|
| 241 |
+
|
| 242 |
+
elif optimizer_idx is None or optimizer_idx == 1: # step for discriminator
|
| 243 |
+
if self.config.losses.adversarial.weight > 0:
|
| 244 |
+
total_loss, metrics = self.discriminator_loss(batch)
|
| 245 |
+
|
| 246 |
+
if self.get_ddp_rank() in (None, 0) and (batch_idx % self.visualize_each_iters == 0 or mode == 'test'):
|
| 247 |
+
if self.config.losses.adversarial.weight > 0:
|
| 248 |
+
if self.store_discr_outputs_for_vis:
|
| 249 |
+
with torch.no_grad():
|
| 250 |
+
self.store_discr_outputs(batch)
|
| 251 |
+
vis_suffix = f'_{mode}'
|
| 252 |
+
if mode == 'extra_val':
|
| 253 |
+
vis_suffix += f'_{extra_val_key}'
|
| 254 |
+
self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix)
|
| 255 |
+
|
| 256 |
+
metrics_prefix = f'{mode}_'
|
| 257 |
+
if mode == 'extra_val':
|
| 258 |
+
metrics_prefix += f'{extra_val_key}_'
|
| 259 |
+
result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix))
|
| 260 |
+
if mode == 'val':
|
| 261 |
+
result['val_evaluator_state'] = self.val_evaluator.process_batch(batch)
|
| 262 |
+
elif mode == 'test':
|
| 263 |
+
result['test_evaluator_state'] = self.test_evaluator.process_batch(batch)
|
| 264 |
+
elif mode == 'extra_val':
|
| 265 |
+
result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch)
|
| 266 |
+
|
| 267 |
+
return result
|
| 268 |
+
|
| 269 |
+
def get_current_generator(self, no_average=False):
|
| 270 |
+
if not no_average and not self.training and self.average_generator and self.generator_average is not None:
|
| 271 |
+
return self.generator_average
|
| 272 |
+
return self.generator
|
| 273 |
+
|
| 274 |
+
def forward(self, batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
|
| 275 |
+
"""Pass data through generator and obtain at leas 'predicted_image' and 'inpainted' keys"""
|
| 276 |
+
raise NotImplementedError()
|
| 277 |
+
|
| 278 |
+
def generator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 279 |
+
raise NotImplementedError()
|
| 280 |
+
|
| 281 |
+
def discriminator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 282 |
+
raise NotImplementedError()
|
| 283 |
+
|
| 284 |
+
def store_discr_outputs(self, batch):
|
| 285 |
+
out_size = batch['image'].shape[2:]
|
| 286 |
+
discr_real_out, _ = self.discriminator(batch['image'])
|
| 287 |
+
discr_fake_out, _ = self.discriminator(batch['predicted_image'])
|
| 288 |
+
batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest')
|
| 289 |
+
batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest')
|
| 290 |
+
batch['discr_output_diff'] = batch['discr_output_real'] - batch['discr_output_fake']
|
| 291 |
+
|
| 292 |
+
def get_ddp_rank(self):
|
| 293 |
+
return self.trainer.global_rank if (self.trainer.num_nodes * self.trainer.num_processes) > 1 else None
|
RAVE-main/annotator/lama/saicinpainting/training/trainers/default.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from omegaconf import OmegaConf
|
| 6 |
+
|
| 7 |
+
# from annotator.lama.saicinpainting.training.data.datasets import make_constant_area_crop_params
|
| 8 |
+
from annotator.lama.saicinpainting.training.losses.distance_weighting import make_mask_distance_weighter
|
| 9 |
+
from annotator.lama.saicinpainting.training.losses.feature_matching import feature_matching_loss, masked_l1_loss
|
| 10 |
+
# from annotator.lama.saicinpainting.training.modules.fake_fakes import FakeFakesGenerator
|
| 11 |
+
from annotator.lama.saicinpainting.training.trainers.base import BaseInpaintingTrainingModule, make_multiscale_noise
|
| 12 |
+
from annotator.lama.saicinpainting.utils import add_prefix_to_keys, get_ramp
|
| 13 |
+
|
| 14 |
+
LOGGER = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def make_constant_area_crop_batch(batch, **kwargs):
|
| 18 |
+
crop_y, crop_x, crop_height, crop_width = make_constant_area_crop_params(img_height=batch['image'].shape[2],
|
| 19 |
+
img_width=batch['image'].shape[3],
|
| 20 |
+
**kwargs)
|
| 21 |
+
batch['image'] = batch['image'][:, :, crop_y : crop_y + crop_height, crop_x : crop_x + crop_width]
|
| 22 |
+
batch['mask'] = batch['mask'][:, :, crop_y: crop_y + crop_height, crop_x: crop_x + crop_width]
|
| 23 |
+
return batch
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class DefaultInpaintingTrainingModule(BaseInpaintingTrainingModule):
|
| 27 |
+
def __init__(self, *args, concat_mask=True, rescale_scheduler_kwargs=None, image_to_discriminator='predicted_image',
|
| 28 |
+
add_noise_kwargs=None, noise_fill_hole=False, const_area_crop_kwargs=None,
|
| 29 |
+
distance_weighter_kwargs=None, distance_weighted_mask_for_discr=False,
|
| 30 |
+
fake_fakes_proba=0, fake_fakes_generator_kwargs=None,
|
| 31 |
+
**kwargs):
|
| 32 |
+
super().__init__(*args, **kwargs)
|
| 33 |
+
self.concat_mask = concat_mask
|
| 34 |
+
self.rescale_size_getter = get_ramp(**rescale_scheduler_kwargs) if rescale_scheduler_kwargs is not None else None
|
| 35 |
+
self.image_to_discriminator = image_to_discriminator
|
| 36 |
+
self.add_noise_kwargs = add_noise_kwargs
|
| 37 |
+
self.noise_fill_hole = noise_fill_hole
|
| 38 |
+
self.const_area_crop_kwargs = const_area_crop_kwargs
|
| 39 |
+
self.refine_mask_for_losses = make_mask_distance_weighter(**distance_weighter_kwargs) \
|
| 40 |
+
if distance_weighter_kwargs is not None else None
|
| 41 |
+
self.distance_weighted_mask_for_discr = distance_weighted_mask_for_discr
|
| 42 |
+
|
| 43 |
+
self.fake_fakes_proba = fake_fakes_proba
|
| 44 |
+
if self.fake_fakes_proba > 1e-3:
|
| 45 |
+
self.fake_fakes_gen = FakeFakesGenerator(**(fake_fakes_generator_kwargs or {}))
|
| 46 |
+
|
| 47 |
+
def forward(self, batch):
|
| 48 |
+
if self.training and self.rescale_size_getter is not None:
|
| 49 |
+
cur_size = self.rescale_size_getter(self.global_step)
|
| 50 |
+
batch['image'] = F.interpolate(batch['image'], size=cur_size, mode='bilinear', align_corners=False)
|
| 51 |
+
batch['mask'] = F.interpolate(batch['mask'], size=cur_size, mode='nearest')
|
| 52 |
+
|
| 53 |
+
if self.training and self.const_area_crop_kwargs is not None:
|
| 54 |
+
batch = make_constant_area_crop_batch(batch, **self.const_area_crop_kwargs)
|
| 55 |
+
|
| 56 |
+
img = batch['image']
|
| 57 |
+
mask = batch['mask']
|
| 58 |
+
|
| 59 |
+
masked_img = img * (1 - mask)
|
| 60 |
+
|
| 61 |
+
if self.add_noise_kwargs is not None:
|
| 62 |
+
noise = make_multiscale_noise(masked_img, **self.add_noise_kwargs)
|
| 63 |
+
if self.noise_fill_hole:
|
| 64 |
+
masked_img = masked_img + mask * noise[:, :masked_img.shape[1]]
|
| 65 |
+
masked_img = torch.cat([masked_img, noise], dim=1)
|
| 66 |
+
|
| 67 |
+
if self.concat_mask:
|
| 68 |
+
masked_img = torch.cat([masked_img, mask], dim=1)
|
| 69 |
+
|
| 70 |
+
batch['predicted_image'] = self.generator(masked_img)
|
| 71 |
+
batch['inpainted'] = mask * batch['predicted_image'] + (1 - mask) * batch['image']
|
| 72 |
+
|
| 73 |
+
if self.fake_fakes_proba > 1e-3:
|
| 74 |
+
if self.training and torch.rand(1).item() < self.fake_fakes_proba:
|
| 75 |
+
batch['fake_fakes'], batch['fake_fakes_masks'] = self.fake_fakes_gen(img, mask)
|
| 76 |
+
batch['use_fake_fakes'] = True
|
| 77 |
+
else:
|
| 78 |
+
batch['fake_fakes'] = torch.zeros_like(img)
|
| 79 |
+
batch['fake_fakes_masks'] = torch.zeros_like(mask)
|
| 80 |
+
batch['use_fake_fakes'] = False
|
| 81 |
+
|
| 82 |
+
batch['mask_for_losses'] = self.refine_mask_for_losses(img, batch['predicted_image'], mask) \
|
| 83 |
+
if self.refine_mask_for_losses is not None and self.training \
|
| 84 |
+
else mask
|
| 85 |
+
|
| 86 |
+
return batch
|
| 87 |
+
|
| 88 |
+
def generator_loss(self, batch):
|
| 89 |
+
img = batch['image']
|
| 90 |
+
predicted_img = batch[self.image_to_discriminator]
|
| 91 |
+
original_mask = batch['mask']
|
| 92 |
+
supervised_mask = batch['mask_for_losses']
|
| 93 |
+
|
| 94 |
+
# L1
|
| 95 |
+
l1_value = masked_l1_loss(predicted_img, img, supervised_mask,
|
| 96 |
+
self.config.losses.l1.weight_known,
|
| 97 |
+
self.config.losses.l1.weight_missing)
|
| 98 |
+
|
| 99 |
+
total_loss = l1_value
|
| 100 |
+
metrics = dict(gen_l1=l1_value)
|
| 101 |
+
|
| 102 |
+
# vgg-based perceptual loss
|
| 103 |
+
if self.config.losses.perceptual.weight > 0:
|
| 104 |
+
pl_value = self.loss_pl(predicted_img, img, mask=supervised_mask).sum() * self.config.losses.perceptual.weight
|
| 105 |
+
total_loss = total_loss + pl_value
|
| 106 |
+
metrics['gen_pl'] = pl_value
|
| 107 |
+
|
| 108 |
+
# discriminator
|
| 109 |
+
# adversarial_loss calls backward by itself
|
| 110 |
+
mask_for_discr = supervised_mask if self.distance_weighted_mask_for_discr else original_mask
|
| 111 |
+
self.adversarial_loss.pre_generator_step(real_batch=img, fake_batch=predicted_img,
|
| 112 |
+
generator=self.generator, discriminator=self.discriminator)
|
| 113 |
+
discr_real_pred, discr_real_features = self.discriminator(img)
|
| 114 |
+
discr_fake_pred, discr_fake_features = self.discriminator(predicted_img)
|
| 115 |
+
adv_gen_loss, adv_metrics = self.adversarial_loss.generator_loss(real_batch=img,
|
| 116 |
+
fake_batch=predicted_img,
|
| 117 |
+
discr_real_pred=discr_real_pred,
|
| 118 |
+
discr_fake_pred=discr_fake_pred,
|
| 119 |
+
mask=mask_for_discr)
|
| 120 |
+
total_loss = total_loss + adv_gen_loss
|
| 121 |
+
metrics['gen_adv'] = adv_gen_loss
|
| 122 |
+
metrics.update(add_prefix_to_keys(adv_metrics, 'adv_'))
|
| 123 |
+
|
| 124 |
+
# feature matching
|
| 125 |
+
if self.config.losses.feature_matching.weight > 0:
|
| 126 |
+
need_mask_in_fm = OmegaConf.to_container(self.config.losses.feature_matching).get('pass_mask', False)
|
| 127 |
+
mask_for_fm = supervised_mask if need_mask_in_fm else None
|
| 128 |
+
fm_value = feature_matching_loss(discr_fake_features, discr_real_features,
|
| 129 |
+
mask=mask_for_fm) * self.config.losses.feature_matching.weight
|
| 130 |
+
total_loss = total_loss + fm_value
|
| 131 |
+
metrics['gen_fm'] = fm_value
|
| 132 |
+
|
| 133 |
+
if self.loss_resnet_pl is not None:
|
| 134 |
+
resnet_pl_value = self.loss_resnet_pl(predicted_img, img)
|
| 135 |
+
total_loss = total_loss + resnet_pl_value
|
| 136 |
+
metrics['gen_resnet_pl'] = resnet_pl_value
|
| 137 |
+
|
| 138 |
+
return total_loss, metrics
|
| 139 |
+
|
| 140 |
+
def discriminator_loss(self, batch):
|
| 141 |
+
total_loss = 0
|
| 142 |
+
metrics = {}
|
| 143 |
+
|
| 144 |
+
predicted_img = batch[self.image_to_discriminator].detach()
|
| 145 |
+
self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=predicted_img,
|
| 146 |
+
generator=self.generator, discriminator=self.discriminator)
|
| 147 |
+
discr_real_pred, discr_real_features = self.discriminator(batch['image'])
|
| 148 |
+
discr_fake_pred, discr_fake_features = self.discriminator(predicted_img)
|
| 149 |
+
adv_discr_loss, adv_metrics = self.adversarial_loss.discriminator_loss(real_batch=batch['image'],
|
| 150 |
+
fake_batch=predicted_img,
|
| 151 |
+
discr_real_pred=discr_real_pred,
|
| 152 |
+
discr_fake_pred=discr_fake_pred,
|
| 153 |
+
mask=batch['mask'])
|
| 154 |
+
total_loss = total_loss + adv_discr_loss
|
| 155 |
+
metrics['discr_adv'] = adv_discr_loss
|
| 156 |
+
metrics.update(add_prefix_to_keys(adv_metrics, 'adv_'))
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
if batch.get('use_fake_fakes', False):
|
| 160 |
+
fake_fakes = batch['fake_fakes']
|
| 161 |
+
self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=fake_fakes,
|
| 162 |
+
generator=self.generator, discriminator=self.discriminator)
|
| 163 |
+
discr_fake_fakes_pred, _ = self.discriminator(fake_fakes)
|
| 164 |
+
fake_fakes_adv_discr_loss, fake_fakes_adv_metrics = self.adversarial_loss.discriminator_loss(
|
| 165 |
+
real_batch=batch['image'],
|
| 166 |
+
fake_batch=fake_fakes,
|
| 167 |
+
discr_real_pred=discr_real_pred,
|
| 168 |
+
discr_fake_pred=discr_fake_fakes_pred,
|
| 169 |
+
mask=batch['mask']
|
| 170 |
+
)
|
| 171 |
+
total_loss = total_loss + fake_fakes_adv_discr_loss
|
| 172 |
+
metrics['discr_adv_fake_fakes'] = fake_fakes_adv_discr_loss
|
| 173 |
+
metrics.update(add_prefix_to_keys(fake_fakes_adv_metrics, 'adv_'))
|
| 174 |
+
|
| 175 |
+
return total_loss, metrics
|
RAVE-main/annotator/lama/saicinpainting/training/visualizers/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
from annotator.lama.saicinpainting.training.visualizers.directory import DirectoryVisualizer
|
| 4 |
+
from annotator.lama.saicinpainting.training.visualizers.noop import NoopVisualizer
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def make_visualizer(kind, **kwargs):
|
| 8 |
+
logging.info(f'Make visualizer {kind}')
|
| 9 |
+
|
| 10 |
+
if kind == 'directory':
|
| 11 |
+
return DirectoryVisualizer(**kwargs)
|
| 12 |
+
if kind == 'noop':
|
| 13 |
+
return NoopVisualizer()
|
| 14 |
+
|
| 15 |
+
raise ValueError(f'Unknown visualizer kind {kind}')
|
RAVE-main/annotator/lama/saicinpainting/training/visualizers/base.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
from typing import Dict, List
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from skimage import color
|
| 7 |
+
from skimage.segmentation import mark_boundaries
|
| 8 |
+
|
| 9 |
+
from . import colors
|
| 10 |
+
|
| 11 |
+
COLORS, _ = colors.generate_colors(151) # 151 - max classes for semantic segmentation
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BaseVisualizer:
|
| 15 |
+
@abc.abstractmethod
|
| 16 |
+
def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None):
|
| 17 |
+
"""
|
| 18 |
+
Take a batch, make an image from it and visualize
|
| 19 |
+
"""
|
| 20 |
+
raise NotImplementedError()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def visualize_mask_and_images(images_dict: Dict[str, np.ndarray], keys: List[str],
|
| 24 |
+
last_without_mask=True, rescale_keys=None, mask_only_first=None,
|
| 25 |
+
black_mask=False) -> np.ndarray:
|
| 26 |
+
mask = images_dict['mask'] > 0.5
|
| 27 |
+
result = []
|
| 28 |
+
for i, k in enumerate(keys):
|
| 29 |
+
img = images_dict[k]
|
| 30 |
+
img = np.transpose(img, (1, 2, 0))
|
| 31 |
+
|
| 32 |
+
if rescale_keys is not None and k in rescale_keys:
|
| 33 |
+
img = img - img.min()
|
| 34 |
+
img /= img.max() + 1e-5
|
| 35 |
+
if len(img.shape) == 2:
|
| 36 |
+
img = np.expand_dims(img, 2)
|
| 37 |
+
|
| 38 |
+
if img.shape[2] == 1:
|
| 39 |
+
img = np.repeat(img, 3, axis=2)
|
| 40 |
+
elif (img.shape[2] > 3):
|
| 41 |
+
img_classes = img.argmax(2)
|
| 42 |
+
img = color.label2rgb(img_classes, colors=COLORS)
|
| 43 |
+
|
| 44 |
+
if mask_only_first:
|
| 45 |
+
need_mark_boundaries = i == 0
|
| 46 |
+
else:
|
| 47 |
+
need_mark_boundaries = i < len(keys) - 1 or not last_without_mask
|
| 48 |
+
|
| 49 |
+
if need_mark_boundaries:
|
| 50 |
+
if black_mask:
|
| 51 |
+
img = img * (1 - mask[0][..., None])
|
| 52 |
+
img = mark_boundaries(img,
|
| 53 |
+
mask[0],
|
| 54 |
+
color=(1., 0., 0.),
|
| 55 |
+
outline_color=(1., 1., 1.),
|
| 56 |
+
mode='thick')
|
| 57 |
+
result.append(img)
|
| 58 |
+
return np.concatenate(result, axis=1)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def visualize_mask_and_images_batch(batch: Dict[str, torch.Tensor], keys: List[str], max_items=10,
|
| 62 |
+
last_without_mask=True, rescale_keys=None) -> np.ndarray:
|
| 63 |
+
batch = {k: tens.detach().cpu().numpy() for k, tens in batch.items()
|
| 64 |
+
if k in keys or k == 'mask'}
|
| 65 |
+
|
| 66 |
+
batch_size = next(iter(batch.values())).shape[0]
|
| 67 |
+
items_to_vis = min(batch_size, max_items)
|
| 68 |
+
result = []
|
| 69 |
+
for i in range(items_to_vis):
|
| 70 |
+
cur_dct = {k: tens[i] for k, tens in batch.items()}
|
| 71 |
+
result.append(visualize_mask_and_images(cur_dct, keys, last_without_mask=last_without_mask,
|
| 72 |
+
rescale_keys=rescale_keys))
|
| 73 |
+
return np.concatenate(result, axis=0)
|
RAVE-main/annotator/lama/saicinpainting/training/visualizers/colors.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import colorsys
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import matplotlib
|
| 6 |
+
matplotlib.use('agg')
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
from matplotlib.colors import LinearSegmentedColormap
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False):
|
| 12 |
+
# https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib
|
| 13 |
+
"""
|
| 14 |
+
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks
|
| 15 |
+
:param nlabels: Number of labels (size of colormap)
|
| 16 |
+
:param type: 'bright' for strong colors, 'soft' for pastel colors
|
| 17 |
+
:param first_color_black: Option to use first color as black, True or False
|
| 18 |
+
:param last_color_black: Option to use last color as black, True or False
|
| 19 |
+
:param verbose: Prints the number of labels and shows the colormap. True or False
|
| 20 |
+
:return: colormap for matplotlib
|
| 21 |
+
"""
|
| 22 |
+
if type not in ('bright', 'soft'):
|
| 23 |
+
print ('Please choose "bright" or "soft" for type')
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
if verbose:
|
| 27 |
+
print('Number of labels: ' + str(nlabels))
|
| 28 |
+
|
| 29 |
+
# Generate color map for bright colors, based on hsv
|
| 30 |
+
if type == 'bright':
|
| 31 |
+
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
|
| 32 |
+
np.random.uniform(low=0.2, high=1),
|
| 33 |
+
np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
|
| 34 |
+
|
| 35 |
+
# Convert HSV list to RGB
|
| 36 |
+
randRGBcolors = []
|
| 37 |
+
for HSVcolor in randHSVcolors:
|
| 38 |
+
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
|
| 39 |
+
|
| 40 |
+
if first_color_black:
|
| 41 |
+
randRGBcolors[0] = [0, 0, 0]
|
| 42 |
+
|
| 43 |
+
if last_color_black:
|
| 44 |
+
randRGBcolors[-1] = [0, 0, 0]
|
| 45 |
+
|
| 46 |
+
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
|
| 47 |
+
|
| 48 |
+
# Generate soft pastel colors, by limiting the RGB spectrum
|
| 49 |
+
if type == 'soft':
|
| 50 |
+
low = 0.6
|
| 51 |
+
high = 0.95
|
| 52 |
+
randRGBcolors = [(np.random.uniform(low=low, high=high),
|
| 53 |
+
np.random.uniform(low=low, high=high),
|
| 54 |
+
np.random.uniform(low=low, high=high)) for i in range(nlabels)]
|
| 55 |
+
|
| 56 |
+
if first_color_black:
|
| 57 |
+
randRGBcolors[0] = [0, 0, 0]
|
| 58 |
+
|
| 59 |
+
if last_color_black:
|
| 60 |
+
randRGBcolors[-1] = [0, 0, 0]
|
| 61 |
+
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
|
| 62 |
+
|
| 63 |
+
# Display colorbar
|
| 64 |
+
if verbose:
|
| 65 |
+
from matplotlib import colors, colorbar
|
| 66 |
+
from matplotlib import pyplot as plt
|
| 67 |
+
fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))
|
| 68 |
+
|
| 69 |
+
bounds = np.linspace(0, nlabels, nlabels + 1)
|
| 70 |
+
norm = colors.BoundaryNorm(bounds, nlabels)
|
| 71 |
+
|
| 72 |
+
cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None,
|
| 73 |
+
boundaries=bounds, format='%1i', orientation=u'horizontal')
|
| 74 |
+
|
| 75 |
+
return randRGBcolors, random_colormap
|
| 76 |
+
|
RAVE-main/annotator/lama/saicinpainting/training/visualizers/directory.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from annotator.lama.saicinpainting.training.visualizers.base import BaseVisualizer, visualize_mask_and_images_batch
|
| 7 |
+
from annotator.lama.saicinpainting.utils import check_and_warn_input_range
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class DirectoryVisualizer(BaseVisualizer):
|
| 11 |
+
DEFAULT_KEY_ORDER = 'image predicted_image inpainted'.split(' ')
|
| 12 |
+
|
| 13 |
+
def __init__(self, outdir, key_order=DEFAULT_KEY_ORDER, max_items_in_batch=10,
|
| 14 |
+
last_without_mask=True, rescale_keys=None):
|
| 15 |
+
self.outdir = outdir
|
| 16 |
+
os.makedirs(self.outdir, exist_ok=True)
|
| 17 |
+
self.key_order = key_order
|
| 18 |
+
self.max_items_in_batch = max_items_in_batch
|
| 19 |
+
self.last_without_mask = last_without_mask
|
| 20 |
+
self.rescale_keys = rescale_keys
|
| 21 |
+
|
| 22 |
+
def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None):
|
| 23 |
+
check_and_warn_input_range(batch['image'], 0, 1, 'DirectoryVisualizer target image')
|
| 24 |
+
vis_img = visualize_mask_and_images_batch(batch, self.key_order, max_items=self.max_items_in_batch,
|
| 25 |
+
last_without_mask=self.last_without_mask,
|
| 26 |
+
rescale_keys=self.rescale_keys)
|
| 27 |
+
|
| 28 |
+
vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8')
|
| 29 |
+
|
| 30 |
+
curoutdir = os.path.join(self.outdir, f'epoch{epoch_i:04d}{suffix}')
|
| 31 |
+
os.makedirs(curoutdir, exist_ok=True)
|
| 32 |
+
rank_suffix = f'_r{rank}' if rank is not None else ''
|
| 33 |
+
out_fname = os.path.join(curoutdir, f'batch{batch_i:07d}{rank_suffix}.jpg')
|
| 34 |
+
|
| 35 |
+
vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR)
|
| 36 |
+
cv2.imwrite(out_fname, vis_img)
|
RAVE-main/annotator/lama/saicinpainting/training/visualizers/noop.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from annotator.lama.saicinpainting.training.visualizers.base import BaseVisualizer
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class NoopVisualizer(BaseVisualizer):
|
| 5 |
+
def __init__(self, *args, **kwargs):
|
| 6 |
+
pass
|
| 7 |
+
|
| 8 |
+
def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None):
|
| 9 |
+
pass
|
RAVE-main/annotator/mmpkg/mmseg/datasets/ade.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .builder import DATASETS
|
| 2 |
+
from .custom import CustomDataset
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@DATASETS.register_module()
|
| 6 |
+
class ADE20KDataset(CustomDataset):
|
| 7 |
+
"""ADE20K dataset.
|
| 8 |
+
|
| 9 |
+
In segmentation map annotation for ADE20K, 0 stands for background, which
|
| 10 |
+
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
|
| 11 |
+
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
|
| 12 |
+
'.png'.
|
| 13 |
+
"""
|
| 14 |
+
CLASSES = (
|
| 15 |
+
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
|
| 16 |
+
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
|
| 17 |
+
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
|
| 18 |
+
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
|
| 19 |
+
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
|
| 20 |
+
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
|
| 21 |
+
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
|
| 22 |
+
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
|
| 23 |
+
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
|
| 24 |
+
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
|
| 25 |
+
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
|
| 26 |
+
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
|
| 27 |
+
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
|
| 28 |
+
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
|
| 29 |
+
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
|
| 30 |
+
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
|
| 31 |
+
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
|
| 32 |
+
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
|
| 33 |
+
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
|
| 34 |
+
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
|
| 35 |
+
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
|
| 36 |
+
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
|
| 37 |
+
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
|
| 38 |
+
'clock', 'flag')
|
| 39 |
+
|
| 40 |
+
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 41 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 42 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 43 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 44 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 45 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 46 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 47 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 48 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 49 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 50 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 51 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 52 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 53 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 54 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
| 55 |
+
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
| 56 |
+
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
| 57 |
+
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
| 58 |
+
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
| 59 |
+
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
| 60 |
+
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
| 61 |
+
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
| 62 |
+
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
| 63 |
+
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
| 64 |
+
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
| 65 |
+
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
| 66 |
+
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
| 67 |
+
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
| 68 |
+
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
| 69 |
+
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
| 70 |
+
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
| 71 |
+
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
| 72 |
+
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
| 73 |
+
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
| 74 |
+
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
| 75 |
+
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
| 76 |
+
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
| 77 |
+
[102, 255, 0], [92, 0, 255]]
|
| 78 |
+
|
| 79 |
+
def __init__(self, **kwargs):
|
| 80 |
+
super(ADE20KDataset, self).__init__(
|
| 81 |
+
img_suffix='.jpg',
|
| 82 |
+
seg_map_suffix='.png',
|
| 83 |
+
reduce_zero_label=True,
|
| 84 |
+
**kwargs)
|
RAVE-main/annotator/mmpkg/mmseg/datasets/builder.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import platform
|
| 3 |
+
import random
|
| 4 |
+
from functools import partial
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from annotator.mmpkg.mmcv.parallel import collate
|
| 8 |
+
from annotator.mmpkg.mmcv.runner import get_dist_info
|
| 9 |
+
from annotator.mmpkg.mmcv.utils import Registry, build_from_cfg
|
| 10 |
+
from annotator.mmpkg.mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
|
| 11 |
+
from torch.utils.data import DistributedSampler
|
| 12 |
+
|
| 13 |
+
if platform.system() != 'Windows':
|
| 14 |
+
# https://github.com/pytorch/pytorch/issues/973
|
| 15 |
+
import resource
|
| 16 |
+
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
|
| 17 |
+
hard_limit = rlimit[1]
|
| 18 |
+
soft_limit = min(4096, hard_limit)
|
| 19 |
+
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
|
| 20 |
+
|
| 21 |
+
DATASETS = Registry('dataset')
|
| 22 |
+
PIPELINES = Registry('pipeline')
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _concat_dataset(cfg, default_args=None):
|
| 26 |
+
"""Build :obj:`ConcatDataset by."""
|
| 27 |
+
from .dataset_wrappers import ConcatDataset
|
| 28 |
+
img_dir = cfg['img_dir']
|
| 29 |
+
ann_dir = cfg.get('ann_dir', None)
|
| 30 |
+
split = cfg.get('split', None)
|
| 31 |
+
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
|
| 32 |
+
if ann_dir is not None:
|
| 33 |
+
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
|
| 34 |
+
else:
|
| 35 |
+
num_ann_dir = 0
|
| 36 |
+
if split is not None:
|
| 37 |
+
num_split = len(split) if isinstance(split, (list, tuple)) else 1
|
| 38 |
+
else:
|
| 39 |
+
num_split = 0
|
| 40 |
+
if num_img_dir > 1:
|
| 41 |
+
assert num_img_dir == num_ann_dir or num_ann_dir == 0
|
| 42 |
+
assert num_img_dir == num_split or num_split == 0
|
| 43 |
+
else:
|
| 44 |
+
assert num_split == num_ann_dir or num_ann_dir <= 1
|
| 45 |
+
num_dset = max(num_split, num_img_dir)
|
| 46 |
+
|
| 47 |
+
datasets = []
|
| 48 |
+
for i in range(num_dset):
|
| 49 |
+
data_cfg = copy.deepcopy(cfg)
|
| 50 |
+
if isinstance(img_dir, (list, tuple)):
|
| 51 |
+
data_cfg['img_dir'] = img_dir[i]
|
| 52 |
+
if isinstance(ann_dir, (list, tuple)):
|
| 53 |
+
data_cfg['ann_dir'] = ann_dir[i]
|
| 54 |
+
if isinstance(split, (list, tuple)):
|
| 55 |
+
data_cfg['split'] = split[i]
|
| 56 |
+
datasets.append(build_dataset(data_cfg, default_args))
|
| 57 |
+
|
| 58 |
+
return ConcatDataset(datasets)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def build_dataset(cfg, default_args=None):
|
| 62 |
+
"""Build datasets."""
|
| 63 |
+
from .dataset_wrappers import ConcatDataset, RepeatDataset
|
| 64 |
+
if isinstance(cfg, (list, tuple)):
|
| 65 |
+
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
|
| 66 |
+
elif cfg['type'] == 'RepeatDataset':
|
| 67 |
+
dataset = RepeatDataset(
|
| 68 |
+
build_dataset(cfg['dataset'], default_args), cfg['times'])
|
| 69 |
+
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
|
| 70 |
+
cfg.get('split', None), (list, tuple)):
|
| 71 |
+
dataset = _concat_dataset(cfg, default_args)
|
| 72 |
+
else:
|
| 73 |
+
dataset = build_from_cfg(cfg, DATASETS, default_args)
|
| 74 |
+
|
| 75 |
+
return dataset
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def build_dataloader(dataset,
|
| 79 |
+
samples_per_gpu,
|
| 80 |
+
workers_per_gpu,
|
| 81 |
+
num_gpus=1,
|
| 82 |
+
dist=True,
|
| 83 |
+
shuffle=True,
|
| 84 |
+
seed=None,
|
| 85 |
+
drop_last=False,
|
| 86 |
+
pin_memory=True,
|
| 87 |
+
dataloader_type='PoolDataLoader',
|
| 88 |
+
**kwargs):
|
| 89 |
+
"""Build PyTorch DataLoader.
|
| 90 |
+
|
| 91 |
+
In distributed training, each GPU/process has a dataloader.
|
| 92 |
+
In non-distributed training, there is only one dataloader for all GPUs.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
dataset (Dataset): A PyTorch dataset.
|
| 96 |
+
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
|
| 97 |
+
batch size of each GPU.
|
| 98 |
+
workers_per_gpu (int): How many subprocesses to use for data loading
|
| 99 |
+
for each GPU.
|
| 100 |
+
num_gpus (int): Number of GPUs. Only used in non-distributed training.
|
| 101 |
+
dist (bool): Distributed training/test or not. Default: True.
|
| 102 |
+
shuffle (bool): Whether to shuffle the data at every epoch.
|
| 103 |
+
Default: True.
|
| 104 |
+
seed (int | None): Seed to be used. Default: None.
|
| 105 |
+
drop_last (bool): Whether to drop the last incomplete batch in epoch.
|
| 106 |
+
Default: False
|
| 107 |
+
pin_memory (bool): Whether to use pin_memory in DataLoader.
|
| 108 |
+
Default: True
|
| 109 |
+
dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
|
| 110 |
+
kwargs: any keyword argument to be used to initialize DataLoader
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
DataLoader: A PyTorch dataloader.
|
| 114 |
+
"""
|
| 115 |
+
rank, world_size = get_dist_info()
|
| 116 |
+
if dist:
|
| 117 |
+
sampler = DistributedSampler(
|
| 118 |
+
dataset, world_size, rank, shuffle=shuffle)
|
| 119 |
+
shuffle = False
|
| 120 |
+
batch_size = samples_per_gpu
|
| 121 |
+
num_workers = workers_per_gpu
|
| 122 |
+
else:
|
| 123 |
+
sampler = None
|
| 124 |
+
batch_size = num_gpus * samples_per_gpu
|
| 125 |
+
num_workers = num_gpus * workers_per_gpu
|
| 126 |
+
|
| 127 |
+
init_fn = partial(
|
| 128 |
+
worker_init_fn, num_workers=num_workers, rank=rank,
|
| 129 |
+
seed=seed) if seed is not None else None
|
| 130 |
+
|
| 131 |
+
assert dataloader_type in (
|
| 132 |
+
'DataLoader',
|
| 133 |
+
'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
|
| 134 |
+
|
| 135 |
+
if dataloader_type == 'PoolDataLoader':
|
| 136 |
+
dataloader = PoolDataLoader
|
| 137 |
+
elif dataloader_type == 'DataLoader':
|
| 138 |
+
dataloader = DataLoader
|
| 139 |
+
|
| 140 |
+
data_loader = dataloader(
|
| 141 |
+
dataset,
|
| 142 |
+
batch_size=batch_size,
|
| 143 |
+
sampler=sampler,
|
| 144 |
+
num_workers=num_workers,
|
| 145 |
+
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
|
| 146 |
+
pin_memory=pin_memory,
|
| 147 |
+
shuffle=shuffle,
|
| 148 |
+
worker_init_fn=init_fn,
|
| 149 |
+
drop_last=drop_last,
|
| 150 |
+
**kwargs)
|
| 151 |
+
|
| 152 |
+
return data_loader
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def worker_init_fn(worker_id, num_workers, rank, seed):
|
| 156 |
+
"""Worker init func for dataloader.
|
| 157 |
+
|
| 158 |
+
The seed of each worker equals to num_worker * rank + worker_id + user_seed
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
worker_id (int): Worker id.
|
| 162 |
+
num_workers (int): Number of workers.
|
| 163 |
+
rank (int): The rank of current process.
|
| 164 |
+
seed (int): The random seed to use.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
worker_seed = num_workers * rank + worker_id + seed
|
| 168 |
+
np.random.seed(worker_seed)
|
| 169 |
+
random.seed(worker_seed)
|
RAVE-main/annotator/mmpkg/mmseg/datasets/chase_db1.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class ChaseDB1Dataset(CustomDataset):
|
| 9 |
+
"""Chase_db1 dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for Chase_db1, 0 stands for background,
|
| 12 |
+
which is included in 2 categories. ``reduce_zero_label`` is fixed to False.
|
| 13 |
+
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_1stHO.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(ChaseDB1Dataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='_1stHO.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
RAVE-main/annotator/mmpkg/mmseg/datasets/hrf.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class HRFDataset(CustomDataset):
|
| 9 |
+
"""HRF dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for HRF, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(HRFDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
RAVE-main/annotator/mmpkg/mmseg/models/backbones/hrnet.py
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
from annotator.mmpkg.mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
|
| 3 |
+
kaiming_init)
|
| 4 |
+
from annotator.mmpkg.mmcv.runner import load_checkpoint
|
| 5 |
+
from annotator.mmpkg.mmcv.utils.parrots_wrapper import _BatchNorm
|
| 6 |
+
|
| 7 |
+
from annotator.mmpkg.mmseg.ops import Upsample, resize
|
| 8 |
+
from annotator.mmpkg.mmseg.utils import get_root_logger
|
| 9 |
+
from ..builder import BACKBONES
|
| 10 |
+
from .resnet import BasicBlock, Bottleneck
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class HRModule(nn.Module):
|
| 14 |
+
"""High-Resolution Module for HRNet.
|
| 15 |
+
|
| 16 |
+
In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
|
| 17 |
+
is in this module.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
num_branches,
|
| 22 |
+
blocks,
|
| 23 |
+
num_blocks,
|
| 24 |
+
in_channels,
|
| 25 |
+
num_channels,
|
| 26 |
+
multiscale_output=True,
|
| 27 |
+
with_cp=False,
|
| 28 |
+
conv_cfg=None,
|
| 29 |
+
norm_cfg=dict(type='BN', requires_grad=True)):
|
| 30 |
+
super(HRModule, self).__init__()
|
| 31 |
+
self._check_branches(num_branches, num_blocks, in_channels,
|
| 32 |
+
num_channels)
|
| 33 |
+
|
| 34 |
+
self.in_channels = in_channels
|
| 35 |
+
self.num_branches = num_branches
|
| 36 |
+
|
| 37 |
+
self.multiscale_output = multiscale_output
|
| 38 |
+
self.norm_cfg = norm_cfg
|
| 39 |
+
self.conv_cfg = conv_cfg
|
| 40 |
+
self.with_cp = with_cp
|
| 41 |
+
self.branches = self._make_branches(num_branches, blocks, num_blocks,
|
| 42 |
+
num_channels)
|
| 43 |
+
self.fuse_layers = self._make_fuse_layers()
|
| 44 |
+
self.relu = nn.ReLU(inplace=False)
|
| 45 |
+
|
| 46 |
+
def _check_branches(self, num_branches, num_blocks, in_channels,
|
| 47 |
+
num_channels):
|
| 48 |
+
"""Check branches configuration."""
|
| 49 |
+
if num_branches != len(num_blocks):
|
| 50 |
+
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS(' \
|
| 51 |
+
f'{len(num_blocks)})'
|
| 52 |
+
raise ValueError(error_msg)
|
| 53 |
+
|
| 54 |
+
if num_branches != len(num_channels):
|
| 55 |
+
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS(' \
|
| 56 |
+
f'{len(num_channels)})'
|
| 57 |
+
raise ValueError(error_msg)
|
| 58 |
+
|
| 59 |
+
if num_branches != len(in_channels):
|
| 60 |
+
error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS(' \
|
| 61 |
+
f'{len(in_channels)})'
|
| 62 |
+
raise ValueError(error_msg)
|
| 63 |
+
|
| 64 |
+
def _make_one_branch(self,
|
| 65 |
+
branch_index,
|
| 66 |
+
block,
|
| 67 |
+
num_blocks,
|
| 68 |
+
num_channels,
|
| 69 |
+
stride=1):
|
| 70 |
+
"""Build one branch."""
|
| 71 |
+
downsample = None
|
| 72 |
+
if stride != 1 or \
|
| 73 |
+
self.in_channels[branch_index] != \
|
| 74 |
+
num_channels[branch_index] * block.expansion:
|
| 75 |
+
downsample = nn.Sequential(
|
| 76 |
+
build_conv_layer(
|
| 77 |
+
self.conv_cfg,
|
| 78 |
+
self.in_channels[branch_index],
|
| 79 |
+
num_channels[branch_index] * block.expansion,
|
| 80 |
+
kernel_size=1,
|
| 81 |
+
stride=stride,
|
| 82 |
+
bias=False),
|
| 83 |
+
build_norm_layer(self.norm_cfg, num_channels[branch_index] *
|
| 84 |
+
block.expansion)[1])
|
| 85 |
+
|
| 86 |
+
layers = []
|
| 87 |
+
layers.append(
|
| 88 |
+
block(
|
| 89 |
+
self.in_channels[branch_index],
|
| 90 |
+
num_channels[branch_index],
|
| 91 |
+
stride,
|
| 92 |
+
downsample=downsample,
|
| 93 |
+
with_cp=self.with_cp,
|
| 94 |
+
norm_cfg=self.norm_cfg,
|
| 95 |
+
conv_cfg=self.conv_cfg))
|
| 96 |
+
self.in_channels[branch_index] = \
|
| 97 |
+
num_channels[branch_index] * block.expansion
|
| 98 |
+
for i in range(1, num_blocks[branch_index]):
|
| 99 |
+
layers.append(
|
| 100 |
+
block(
|
| 101 |
+
self.in_channels[branch_index],
|
| 102 |
+
num_channels[branch_index],
|
| 103 |
+
with_cp=self.with_cp,
|
| 104 |
+
norm_cfg=self.norm_cfg,
|
| 105 |
+
conv_cfg=self.conv_cfg))
|
| 106 |
+
|
| 107 |
+
return nn.Sequential(*layers)
|
| 108 |
+
|
| 109 |
+
def _make_branches(self, num_branches, block, num_blocks, num_channels):
|
| 110 |
+
"""Build multiple branch."""
|
| 111 |
+
branches = []
|
| 112 |
+
|
| 113 |
+
for i in range(num_branches):
|
| 114 |
+
branches.append(
|
| 115 |
+
self._make_one_branch(i, block, num_blocks, num_channels))
|
| 116 |
+
|
| 117 |
+
return nn.ModuleList(branches)
|
| 118 |
+
|
| 119 |
+
def _make_fuse_layers(self):
|
| 120 |
+
"""Build fuse layer."""
|
| 121 |
+
if self.num_branches == 1:
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
num_branches = self.num_branches
|
| 125 |
+
in_channels = self.in_channels
|
| 126 |
+
fuse_layers = []
|
| 127 |
+
num_out_branches = num_branches if self.multiscale_output else 1
|
| 128 |
+
for i in range(num_out_branches):
|
| 129 |
+
fuse_layer = []
|
| 130 |
+
for j in range(num_branches):
|
| 131 |
+
if j > i:
|
| 132 |
+
fuse_layer.append(
|
| 133 |
+
nn.Sequential(
|
| 134 |
+
build_conv_layer(
|
| 135 |
+
self.conv_cfg,
|
| 136 |
+
in_channels[j],
|
| 137 |
+
in_channels[i],
|
| 138 |
+
kernel_size=1,
|
| 139 |
+
stride=1,
|
| 140 |
+
padding=0,
|
| 141 |
+
bias=False),
|
| 142 |
+
build_norm_layer(self.norm_cfg, in_channels[i])[1],
|
| 143 |
+
# we set align_corners=False for HRNet
|
| 144 |
+
Upsample(
|
| 145 |
+
scale_factor=2**(j - i),
|
| 146 |
+
mode='bilinear',
|
| 147 |
+
align_corners=False)))
|
| 148 |
+
elif j == i:
|
| 149 |
+
fuse_layer.append(None)
|
| 150 |
+
else:
|
| 151 |
+
conv_downsamples = []
|
| 152 |
+
for k in range(i - j):
|
| 153 |
+
if k == i - j - 1:
|
| 154 |
+
conv_downsamples.append(
|
| 155 |
+
nn.Sequential(
|
| 156 |
+
build_conv_layer(
|
| 157 |
+
self.conv_cfg,
|
| 158 |
+
in_channels[j],
|
| 159 |
+
in_channels[i],
|
| 160 |
+
kernel_size=3,
|
| 161 |
+
stride=2,
|
| 162 |
+
padding=1,
|
| 163 |
+
bias=False),
|
| 164 |
+
build_norm_layer(self.norm_cfg,
|
| 165 |
+
in_channels[i])[1]))
|
| 166 |
+
else:
|
| 167 |
+
conv_downsamples.append(
|
| 168 |
+
nn.Sequential(
|
| 169 |
+
build_conv_layer(
|
| 170 |
+
self.conv_cfg,
|
| 171 |
+
in_channels[j],
|
| 172 |
+
in_channels[j],
|
| 173 |
+
kernel_size=3,
|
| 174 |
+
stride=2,
|
| 175 |
+
padding=1,
|
| 176 |
+
bias=False),
|
| 177 |
+
build_norm_layer(self.norm_cfg,
|
| 178 |
+
in_channels[j])[1],
|
| 179 |
+
nn.ReLU(inplace=False)))
|
| 180 |
+
fuse_layer.append(nn.Sequential(*conv_downsamples))
|
| 181 |
+
fuse_layers.append(nn.ModuleList(fuse_layer))
|
| 182 |
+
|
| 183 |
+
return nn.ModuleList(fuse_layers)
|
| 184 |
+
|
| 185 |
+
def forward(self, x):
|
| 186 |
+
"""Forward function."""
|
| 187 |
+
if self.num_branches == 1:
|
| 188 |
+
return [self.branches[0](x[0])]
|
| 189 |
+
|
| 190 |
+
for i in range(self.num_branches):
|
| 191 |
+
x[i] = self.branches[i](x[i])
|
| 192 |
+
|
| 193 |
+
x_fuse = []
|
| 194 |
+
for i in range(len(self.fuse_layers)):
|
| 195 |
+
y = 0
|
| 196 |
+
for j in range(self.num_branches):
|
| 197 |
+
if i == j:
|
| 198 |
+
y += x[j]
|
| 199 |
+
elif j > i:
|
| 200 |
+
y = y + resize(
|
| 201 |
+
self.fuse_layers[i][j](x[j]),
|
| 202 |
+
size=x[i].shape[2:],
|
| 203 |
+
mode='bilinear',
|
| 204 |
+
align_corners=False)
|
| 205 |
+
else:
|
| 206 |
+
y += self.fuse_layers[i][j](x[j])
|
| 207 |
+
x_fuse.append(self.relu(y))
|
| 208 |
+
return x_fuse
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
@BACKBONES.register_module()
|
| 212 |
+
class HRNet(nn.Module):
|
| 213 |
+
"""HRNet backbone.
|
| 214 |
+
|
| 215 |
+
High-Resolution Representations for Labeling Pixels and Regions
|
| 216 |
+
arXiv: https://arxiv.org/abs/1904.04514
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
extra (dict): detailed configuration for each stage of HRNet.
|
| 220 |
+
in_channels (int): Number of input image channels. Normally 3.
|
| 221 |
+
conv_cfg (dict): dictionary to construct and config conv layer.
|
| 222 |
+
norm_cfg (dict): dictionary to construct and config norm layer.
|
| 223 |
+
norm_eval (bool): Whether to set norm layers to eval mode, namely,
|
| 224 |
+
freeze running stats (mean and var). Note: Effect on Batch Norm
|
| 225 |
+
and its variants only.
|
| 226 |
+
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
|
| 227 |
+
memory while slowing down the training speed.
|
| 228 |
+
zero_init_residual (bool): whether to use zero init for last norm layer
|
| 229 |
+
in resblocks to let them behave as identity.
|
| 230 |
+
|
| 231 |
+
Example:
|
| 232 |
+
>>> from annotator.mmpkg.mmseg.models import HRNet
|
| 233 |
+
>>> import torch
|
| 234 |
+
>>> extra = dict(
|
| 235 |
+
>>> stage1=dict(
|
| 236 |
+
>>> num_modules=1,
|
| 237 |
+
>>> num_branches=1,
|
| 238 |
+
>>> block='BOTTLENECK',
|
| 239 |
+
>>> num_blocks=(4, ),
|
| 240 |
+
>>> num_channels=(64, )),
|
| 241 |
+
>>> stage2=dict(
|
| 242 |
+
>>> num_modules=1,
|
| 243 |
+
>>> num_branches=2,
|
| 244 |
+
>>> block='BASIC',
|
| 245 |
+
>>> num_blocks=(4, 4),
|
| 246 |
+
>>> num_channels=(32, 64)),
|
| 247 |
+
>>> stage3=dict(
|
| 248 |
+
>>> num_modules=4,
|
| 249 |
+
>>> num_branches=3,
|
| 250 |
+
>>> block='BASIC',
|
| 251 |
+
>>> num_blocks=(4, 4, 4),
|
| 252 |
+
>>> num_channels=(32, 64, 128)),
|
| 253 |
+
>>> stage4=dict(
|
| 254 |
+
>>> num_modules=3,
|
| 255 |
+
>>> num_branches=4,
|
| 256 |
+
>>> block='BASIC',
|
| 257 |
+
>>> num_blocks=(4, 4, 4, 4),
|
| 258 |
+
>>> num_channels=(32, 64, 128, 256)))
|
| 259 |
+
>>> self = HRNet(extra, in_channels=1)
|
| 260 |
+
>>> self.eval()
|
| 261 |
+
>>> inputs = torch.rand(1, 1, 32, 32)
|
| 262 |
+
>>> level_outputs = self.forward(inputs)
|
| 263 |
+
>>> for level_out in level_outputs:
|
| 264 |
+
... print(tuple(level_out.shape))
|
| 265 |
+
(1, 32, 8, 8)
|
| 266 |
+
(1, 64, 4, 4)
|
| 267 |
+
(1, 128, 2, 2)
|
| 268 |
+
(1, 256, 1, 1)
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
|
| 272 |
+
|
| 273 |
+
def __init__(self,
|
| 274 |
+
extra,
|
| 275 |
+
in_channels=3,
|
| 276 |
+
conv_cfg=None,
|
| 277 |
+
norm_cfg=dict(type='BN', requires_grad=True),
|
| 278 |
+
norm_eval=False,
|
| 279 |
+
with_cp=False,
|
| 280 |
+
zero_init_residual=False):
|
| 281 |
+
super(HRNet, self).__init__()
|
| 282 |
+
self.extra = extra
|
| 283 |
+
self.conv_cfg = conv_cfg
|
| 284 |
+
self.norm_cfg = norm_cfg
|
| 285 |
+
self.norm_eval = norm_eval
|
| 286 |
+
self.with_cp = with_cp
|
| 287 |
+
self.zero_init_residual = zero_init_residual
|
| 288 |
+
|
| 289 |
+
# stem net
|
| 290 |
+
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
|
| 291 |
+
self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
|
| 292 |
+
|
| 293 |
+
self.conv1 = build_conv_layer(
|
| 294 |
+
self.conv_cfg,
|
| 295 |
+
in_channels,
|
| 296 |
+
64,
|
| 297 |
+
kernel_size=3,
|
| 298 |
+
stride=2,
|
| 299 |
+
padding=1,
|
| 300 |
+
bias=False)
|
| 301 |
+
|
| 302 |
+
self.add_module(self.norm1_name, norm1)
|
| 303 |
+
self.conv2 = build_conv_layer(
|
| 304 |
+
self.conv_cfg,
|
| 305 |
+
64,
|
| 306 |
+
64,
|
| 307 |
+
kernel_size=3,
|
| 308 |
+
stride=2,
|
| 309 |
+
padding=1,
|
| 310 |
+
bias=False)
|
| 311 |
+
|
| 312 |
+
self.add_module(self.norm2_name, norm2)
|
| 313 |
+
self.relu = nn.ReLU(inplace=True)
|
| 314 |
+
|
| 315 |
+
# stage 1
|
| 316 |
+
self.stage1_cfg = self.extra['stage1']
|
| 317 |
+
num_channels = self.stage1_cfg['num_channels'][0]
|
| 318 |
+
block_type = self.stage1_cfg['block']
|
| 319 |
+
num_blocks = self.stage1_cfg['num_blocks'][0]
|
| 320 |
+
|
| 321 |
+
block = self.blocks_dict[block_type]
|
| 322 |
+
stage1_out_channels = num_channels * block.expansion
|
| 323 |
+
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
|
| 324 |
+
|
| 325 |
+
# stage 2
|
| 326 |
+
self.stage2_cfg = self.extra['stage2']
|
| 327 |
+
num_channels = self.stage2_cfg['num_channels']
|
| 328 |
+
block_type = self.stage2_cfg['block']
|
| 329 |
+
|
| 330 |
+
block = self.blocks_dict[block_type]
|
| 331 |
+
num_channels = [channel * block.expansion for channel in num_channels]
|
| 332 |
+
self.transition1 = self._make_transition_layer([stage1_out_channels],
|
| 333 |
+
num_channels)
|
| 334 |
+
self.stage2, pre_stage_channels = self._make_stage(
|
| 335 |
+
self.stage2_cfg, num_channels)
|
| 336 |
+
|
| 337 |
+
# stage 3
|
| 338 |
+
self.stage3_cfg = self.extra['stage3']
|
| 339 |
+
num_channels = self.stage3_cfg['num_channels']
|
| 340 |
+
block_type = self.stage3_cfg['block']
|
| 341 |
+
|
| 342 |
+
block = self.blocks_dict[block_type]
|
| 343 |
+
num_channels = [channel * block.expansion for channel in num_channels]
|
| 344 |
+
self.transition2 = self._make_transition_layer(pre_stage_channels,
|
| 345 |
+
num_channels)
|
| 346 |
+
self.stage3, pre_stage_channels = self._make_stage(
|
| 347 |
+
self.stage3_cfg, num_channels)
|
| 348 |
+
|
| 349 |
+
# stage 4
|
| 350 |
+
self.stage4_cfg = self.extra['stage4']
|
| 351 |
+
num_channels = self.stage4_cfg['num_channels']
|
| 352 |
+
block_type = self.stage4_cfg['block']
|
| 353 |
+
|
| 354 |
+
block = self.blocks_dict[block_type]
|
| 355 |
+
num_channels = [channel * block.expansion for channel in num_channels]
|
| 356 |
+
self.transition3 = self._make_transition_layer(pre_stage_channels,
|
| 357 |
+
num_channels)
|
| 358 |
+
self.stage4, pre_stage_channels = self._make_stage(
|
| 359 |
+
self.stage4_cfg, num_channels)
|
| 360 |
+
|
| 361 |
+
@property
|
| 362 |
+
def norm1(self):
|
| 363 |
+
"""nn.Module: the normalization layer named "norm1" """
|
| 364 |
+
return getattr(self, self.norm1_name)
|
| 365 |
+
|
| 366 |
+
@property
|
| 367 |
+
def norm2(self):
|
| 368 |
+
"""nn.Module: the normalization layer named "norm2" """
|
| 369 |
+
return getattr(self, self.norm2_name)
|
| 370 |
+
|
| 371 |
+
def _make_transition_layer(self, num_channels_pre_layer,
|
| 372 |
+
num_channels_cur_layer):
|
| 373 |
+
"""Make transition layer."""
|
| 374 |
+
num_branches_cur = len(num_channels_cur_layer)
|
| 375 |
+
num_branches_pre = len(num_channels_pre_layer)
|
| 376 |
+
|
| 377 |
+
transition_layers = []
|
| 378 |
+
for i in range(num_branches_cur):
|
| 379 |
+
if i < num_branches_pre:
|
| 380 |
+
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
|
| 381 |
+
transition_layers.append(
|
| 382 |
+
nn.Sequential(
|
| 383 |
+
build_conv_layer(
|
| 384 |
+
self.conv_cfg,
|
| 385 |
+
num_channels_pre_layer[i],
|
| 386 |
+
num_channels_cur_layer[i],
|
| 387 |
+
kernel_size=3,
|
| 388 |
+
stride=1,
|
| 389 |
+
padding=1,
|
| 390 |
+
bias=False),
|
| 391 |
+
build_norm_layer(self.norm_cfg,
|
| 392 |
+
num_channels_cur_layer[i])[1],
|
| 393 |
+
nn.ReLU(inplace=True)))
|
| 394 |
+
else:
|
| 395 |
+
transition_layers.append(None)
|
| 396 |
+
else:
|
| 397 |
+
conv_downsamples = []
|
| 398 |
+
for j in range(i + 1 - num_branches_pre):
|
| 399 |
+
in_channels = num_channels_pre_layer[-1]
|
| 400 |
+
out_channels = num_channels_cur_layer[i] \
|
| 401 |
+
if j == i - num_branches_pre else in_channels
|
| 402 |
+
conv_downsamples.append(
|
| 403 |
+
nn.Sequential(
|
| 404 |
+
build_conv_layer(
|
| 405 |
+
self.conv_cfg,
|
| 406 |
+
in_channels,
|
| 407 |
+
out_channels,
|
| 408 |
+
kernel_size=3,
|
| 409 |
+
stride=2,
|
| 410 |
+
padding=1,
|
| 411 |
+
bias=False),
|
| 412 |
+
build_norm_layer(self.norm_cfg, out_channels)[1],
|
| 413 |
+
nn.ReLU(inplace=True)))
|
| 414 |
+
transition_layers.append(nn.Sequential(*conv_downsamples))
|
| 415 |
+
|
| 416 |
+
return nn.ModuleList(transition_layers)
|
| 417 |
+
|
| 418 |
+
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
|
| 419 |
+
"""Make each layer."""
|
| 420 |
+
downsample = None
|
| 421 |
+
if stride != 1 or inplanes != planes * block.expansion:
|
| 422 |
+
downsample = nn.Sequential(
|
| 423 |
+
build_conv_layer(
|
| 424 |
+
self.conv_cfg,
|
| 425 |
+
inplanes,
|
| 426 |
+
planes * block.expansion,
|
| 427 |
+
kernel_size=1,
|
| 428 |
+
stride=stride,
|
| 429 |
+
bias=False),
|
| 430 |
+
build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
|
| 431 |
+
|
| 432 |
+
layers = []
|
| 433 |
+
layers.append(
|
| 434 |
+
block(
|
| 435 |
+
inplanes,
|
| 436 |
+
planes,
|
| 437 |
+
stride,
|
| 438 |
+
downsample=downsample,
|
| 439 |
+
with_cp=self.with_cp,
|
| 440 |
+
norm_cfg=self.norm_cfg,
|
| 441 |
+
conv_cfg=self.conv_cfg))
|
| 442 |
+
inplanes = planes * block.expansion
|
| 443 |
+
for i in range(1, blocks):
|
| 444 |
+
layers.append(
|
| 445 |
+
block(
|
| 446 |
+
inplanes,
|
| 447 |
+
planes,
|
| 448 |
+
with_cp=self.with_cp,
|
| 449 |
+
norm_cfg=self.norm_cfg,
|
| 450 |
+
conv_cfg=self.conv_cfg))
|
| 451 |
+
|
| 452 |
+
return nn.Sequential(*layers)
|
| 453 |
+
|
| 454 |
+
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
|
| 455 |
+
"""Make each stage."""
|
| 456 |
+
num_modules = layer_config['num_modules']
|
| 457 |
+
num_branches = layer_config['num_branches']
|
| 458 |
+
num_blocks = layer_config['num_blocks']
|
| 459 |
+
num_channels = layer_config['num_channels']
|
| 460 |
+
block = self.blocks_dict[layer_config['block']]
|
| 461 |
+
|
| 462 |
+
hr_modules = []
|
| 463 |
+
for i in range(num_modules):
|
| 464 |
+
# multi_scale_output is only used for the last module
|
| 465 |
+
if not multiscale_output and i == num_modules - 1:
|
| 466 |
+
reset_multiscale_output = False
|
| 467 |
+
else:
|
| 468 |
+
reset_multiscale_output = True
|
| 469 |
+
|
| 470 |
+
hr_modules.append(
|
| 471 |
+
HRModule(
|
| 472 |
+
num_branches,
|
| 473 |
+
block,
|
| 474 |
+
num_blocks,
|
| 475 |
+
in_channels,
|
| 476 |
+
num_channels,
|
| 477 |
+
reset_multiscale_output,
|
| 478 |
+
with_cp=self.with_cp,
|
| 479 |
+
norm_cfg=self.norm_cfg,
|
| 480 |
+
conv_cfg=self.conv_cfg))
|
| 481 |
+
|
| 482 |
+
return nn.Sequential(*hr_modules), in_channels
|
| 483 |
+
|
| 484 |
+
def init_weights(self, pretrained=None):
|
| 485 |
+
"""Initialize the weights in backbone.
|
| 486 |
+
|
| 487 |
+
Args:
|
| 488 |
+
pretrained (str, optional): Path to pre-trained weights.
|
| 489 |
+
Defaults to None.
|
| 490 |
+
"""
|
| 491 |
+
if isinstance(pretrained, str):
|
| 492 |
+
logger = get_root_logger()
|
| 493 |
+
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
| 494 |
+
elif pretrained is None:
|
| 495 |
+
for m in self.modules():
|
| 496 |
+
if isinstance(m, nn.Conv2d):
|
| 497 |
+
kaiming_init(m)
|
| 498 |
+
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
|
| 499 |
+
constant_init(m, 1)
|
| 500 |
+
|
| 501 |
+
if self.zero_init_residual:
|
| 502 |
+
for m in self.modules():
|
| 503 |
+
if isinstance(m, Bottleneck):
|
| 504 |
+
constant_init(m.norm3, 0)
|
| 505 |
+
elif isinstance(m, BasicBlock):
|
| 506 |
+
constant_init(m.norm2, 0)
|
| 507 |
+
else:
|
| 508 |
+
raise TypeError('pretrained must be a str or None')
|
| 509 |
+
|
| 510 |
+
def forward(self, x):
|
| 511 |
+
"""Forward function."""
|
| 512 |
+
|
| 513 |
+
x = self.conv1(x)
|
| 514 |
+
x = self.norm1(x)
|
| 515 |
+
x = self.relu(x)
|
| 516 |
+
x = self.conv2(x)
|
| 517 |
+
x = self.norm2(x)
|
| 518 |
+
x = self.relu(x)
|
| 519 |
+
x = self.layer1(x)
|
| 520 |
+
|
| 521 |
+
x_list = []
|
| 522 |
+
for i in range(self.stage2_cfg['num_branches']):
|
| 523 |
+
if self.transition1[i] is not None:
|
| 524 |
+
x_list.append(self.transition1[i](x))
|
| 525 |
+
else:
|
| 526 |
+
x_list.append(x)
|
| 527 |
+
y_list = self.stage2(x_list)
|
| 528 |
+
|
| 529 |
+
x_list = []
|
| 530 |
+
for i in range(self.stage3_cfg['num_branches']):
|
| 531 |
+
if self.transition2[i] is not None:
|
| 532 |
+
x_list.append(self.transition2[i](y_list[-1]))
|
| 533 |
+
else:
|
| 534 |
+
x_list.append(y_list[i])
|
| 535 |
+
y_list = self.stage3(x_list)
|
| 536 |
+
|
| 537 |
+
x_list = []
|
| 538 |
+
for i in range(self.stage4_cfg['num_branches']):
|
| 539 |
+
if self.transition3[i] is not None:
|
| 540 |
+
x_list.append(self.transition3[i](y_list[-1]))
|
| 541 |
+
else:
|
| 542 |
+
x_list.append(y_list[i])
|
| 543 |
+
y_list = self.stage4(x_list)
|
| 544 |
+
|
| 545 |
+
return y_list
|
| 546 |
+
|
| 547 |
+
def train(self, mode=True):
|
| 548 |
+
"""Convert the model into training mode will keeping the normalization
|
| 549 |
+
layer freezed."""
|
| 550 |
+
super(HRNet, self).train(mode)
|
| 551 |
+
if mode and self.norm_eval:
|
| 552 |
+
for m in self.modules():
|
| 553 |
+
# trick: eval have effect on BatchNorm only
|
| 554 |
+
if isinstance(m, _BatchNorm):
|
| 555 |
+
m.eval()
|
RAVE-main/annotator/mmpkg/mmseg/models/necks/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .fpn import FPN
|
| 2 |
+
from .multilevel_neck import MultiLevelNeck
|
| 3 |
+
|
| 4 |
+
__all__ = ['FPN', 'MultiLevelNeck']
|
RAVE-main/annotator/mmpkg/mmseg/models/necks/fpn.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule, xavier_init
|
| 4 |
+
|
| 5 |
+
from ..builder import NECKS
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@NECKS.register_module()
|
| 9 |
+
class FPN(nn.Module):
|
| 10 |
+
"""Feature Pyramid Network.
|
| 11 |
+
|
| 12 |
+
This is an implementation of - Feature Pyramid Networks for Object
|
| 13 |
+
Detection (https://arxiv.org/abs/1612.03144)
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
in_channels (List[int]): Number of input channels per scale.
|
| 17 |
+
out_channels (int): Number of output channels (used at each scale)
|
| 18 |
+
num_outs (int): Number of output scales.
|
| 19 |
+
start_level (int): Index of the start input backbone level used to
|
| 20 |
+
build the feature pyramid. Default: 0.
|
| 21 |
+
end_level (int): Index of the end input backbone level (exclusive) to
|
| 22 |
+
build the feature pyramid. Default: -1, which means the last level.
|
| 23 |
+
add_extra_convs (bool | str): If bool, it decides whether to add conv
|
| 24 |
+
layers on top of the original feature maps. Default to False.
|
| 25 |
+
If True, its actual mode is specified by `extra_convs_on_inputs`.
|
| 26 |
+
If str, it specifies the source feature map of the extra convs.
|
| 27 |
+
Only the following options are allowed
|
| 28 |
+
|
| 29 |
+
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
|
| 30 |
+
- 'on_lateral': Last feature map after lateral convs.
|
| 31 |
+
- 'on_output': The last output feature map after fpn convs.
|
| 32 |
+
extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
|
| 33 |
+
on the original feature from the backbone. If True,
|
| 34 |
+
it is equivalent to `add_extra_convs='on_input'`. If False, it is
|
| 35 |
+
equivalent to set `add_extra_convs='on_output'`. Default to True.
|
| 36 |
+
relu_before_extra_convs (bool): Whether to apply relu before the extra
|
| 37 |
+
conv. Default: False.
|
| 38 |
+
no_norm_on_lateral (bool): Whether to apply norm on lateral.
|
| 39 |
+
Default: False.
|
| 40 |
+
conv_cfg (dict): Config dict for convolution layer. Default: None.
|
| 41 |
+
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
| 42 |
+
act_cfg (str): Config dict for activation layer in ConvModule.
|
| 43 |
+
Default: None.
|
| 44 |
+
upsample_cfg (dict): Config dict for interpolate layer.
|
| 45 |
+
Default: `dict(mode='nearest')`
|
| 46 |
+
|
| 47 |
+
Example:
|
| 48 |
+
>>> import torch
|
| 49 |
+
>>> in_channels = [2, 3, 5, 7]
|
| 50 |
+
>>> scales = [340, 170, 84, 43]
|
| 51 |
+
>>> inputs = [torch.rand(1, c, s, s)
|
| 52 |
+
... for c, s in zip(in_channels, scales)]
|
| 53 |
+
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
|
| 54 |
+
>>> outputs = self.forward(inputs)
|
| 55 |
+
>>> for i in range(len(outputs)):
|
| 56 |
+
... print(f'outputs[{i}].shape = {outputs[i].shape}')
|
| 57 |
+
outputs[0].shape = torch.Size([1, 11, 340, 340])
|
| 58 |
+
outputs[1].shape = torch.Size([1, 11, 170, 170])
|
| 59 |
+
outputs[2].shape = torch.Size([1, 11, 84, 84])
|
| 60 |
+
outputs[3].shape = torch.Size([1, 11, 43, 43])
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(self,
|
| 64 |
+
in_channels,
|
| 65 |
+
out_channels,
|
| 66 |
+
num_outs,
|
| 67 |
+
start_level=0,
|
| 68 |
+
end_level=-1,
|
| 69 |
+
add_extra_convs=False,
|
| 70 |
+
extra_convs_on_inputs=False,
|
| 71 |
+
relu_before_extra_convs=False,
|
| 72 |
+
no_norm_on_lateral=False,
|
| 73 |
+
conv_cfg=None,
|
| 74 |
+
norm_cfg=None,
|
| 75 |
+
act_cfg=None,
|
| 76 |
+
upsample_cfg=dict(mode='nearest')):
|
| 77 |
+
super(FPN, self).__init__()
|
| 78 |
+
assert isinstance(in_channels, list)
|
| 79 |
+
self.in_channels = in_channels
|
| 80 |
+
self.out_channels = out_channels
|
| 81 |
+
self.num_ins = len(in_channels)
|
| 82 |
+
self.num_outs = num_outs
|
| 83 |
+
self.relu_before_extra_convs = relu_before_extra_convs
|
| 84 |
+
self.no_norm_on_lateral = no_norm_on_lateral
|
| 85 |
+
self.fp16_enabled = False
|
| 86 |
+
self.upsample_cfg = upsample_cfg.copy()
|
| 87 |
+
|
| 88 |
+
if end_level == -1:
|
| 89 |
+
self.backbone_end_level = self.num_ins
|
| 90 |
+
assert num_outs >= self.num_ins - start_level
|
| 91 |
+
else:
|
| 92 |
+
# if end_level < inputs, no extra level is allowed
|
| 93 |
+
self.backbone_end_level = end_level
|
| 94 |
+
assert end_level <= len(in_channels)
|
| 95 |
+
assert num_outs == end_level - start_level
|
| 96 |
+
self.start_level = start_level
|
| 97 |
+
self.end_level = end_level
|
| 98 |
+
self.add_extra_convs = add_extra_convs
|
| 99 |
+
assert isinstance(add_extra_convs, (str, bool))
|
| 100 |
+
if isinstance(add_extra_convs, str):
|
| 101 |
+
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
|
| 102 |
+
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
|
| 103 |
+
elif add_extra_convs: # True
|
| 104 |
+
if extra_convs_on_inputs:
|
| 105 |
+
# For compatibility with previous release
|
| 106 |
+
# TODO: deprecate `extra_convs_on_inputs`
|
| 107 |
+
self.add_extra_convs = 'on_input'
|
| 108 |
+
else:
|
| 109 |
+
self.add_extra_convs = 'on_output'
|
| 110 |
+
|
| 111 |
+
self.lateral_convs = nn.ModuleList()
|
| 112 |
+
self.fpn_convs = nn.ModuleList()
|
| 113 |
+
|
| 114 |
+
for i in range(self.start_level, self.backbone_end_level):
|
| 115 |
+
l_conv = ConvModule(
|
| 116 |
+
in_channels[i],
|
| 117 |
+
out_channels,
|
| 118 |
+
1,
|
| 119 |
+
conv_cfg=conv_cfg,
|
| 120 |
+
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
|
| 121 |
+
act_cfg=act_cfg,
|
| 122 |
+
inplace=False)
|
| 123 |
+
fpn_conv = ConvModule(
|
| 124 |
+
out_channels,
|
| 125 |
+
out_channels,
|
| 126 |
+
3,
|
| 127 |
+
padding=1,
|
| 128 |
+
conv_cfg=conv_cfg,
|
| 129 |
+
norm_cfg=norm_cfg,
|
| 130 |
+
act_cfg=act_cfg,
|
| 131 |
+
inplace=False)
|
| 132 |
+
|
| 133 |
+
self.lateral_convs.append(l_conv)
|
| 134 |
+
self.fpn_convs.append(fpn_conv)
|
| 135 |
+
|
| 136 |
+
# add extra conv layers (e.g., RetinaNet)
|
| 137 |
+
extra_levels = num_outs - self.backbone_end_level + self.start_level
|
| 138 |
+
if self.add_extra_convs and extra_levels >= 1:
|
| 139 |
+
for i in range(extra_levels):
|
| 140 |
+
if i == 0 and self.add_extra_convs == 'on_input':
|
| 141 |
+
in_channels = self.in_channels[self.backbone_end_level - 1]
|
| 142 |
+
else:
|
| 143 |
+
in_channels = out_channels
|
| 144 |
+
extra_fpn_conv = ConvModule(
|
| 145 |
+
in_channels,
|
| 146 |
+
out_channels,
|
| 147 |
+
3,
|
| 148 |
+
stride=2,
|
| 149 |
+
padding=1,
|
| 150 |
+
conv_cfg=conv_cfg,
|
| 151 |
+
norm_cfg=norm_cfg,
|
| 152 |
+
act_cfg=act_cfg,
|
| 153 |
+
inplace=False)
|
| 154 |
+
self.fpn_convs.append(extra_fpn_conv)
|
| 155 |
+
|
| 156 |
+
# default init_weights for conv(msra) and norm in ConvModule
|
| 157 |
+
def init_weights(self):
|
| 158 |
+
for m in self.modules():
|
| 159 |
+
if isinstance(m, nn.Conv2d):
|
| 160 |
+
xavier_init(m, distribution='uniform')
|
| 161 |
+
|
| 162 |
+
def forward(self, inputs):
|
| 163 |
+
assert len(inputs) == len(self.in_channels)
|
| 164 |
+
|
| 165 |
+
# build laterals
|
| 166 |
+
laterals = [
|
| 167 |
+
lateral_conv(inputs[i + self.start_level])
|
| 168 |
+
for i, lateral_conv in enumerate(self.lateral_convs)
|
| 169 |
+
]
|
| 170 |
+
|
| 171 |
+
# build top-down path
|
| 172 |
+
used_backbone_levels = len(laterals)
|
| 173 |
+
for i in range(used_backbone_levels - 1, 0, -1):
|
| 174 |
+
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
|
| 175 |
+
# it cannot co-exist with `size` in `F.interpolate`.
|
| 176 |
+
if 'scale_factor' in self.upsample_cfg:
|
| 177 |
+
laterals[i - 1] += F.interpolate(laterals[i],
|
| 178 |
+
**self.upsample_cfg)
|
| 179 |
+
else:
|
| 180 |
+
prev_shape = laterals[i - 1].shape[2:]
|
| 181 |
+
laterals[i - 1] += F.interpolate(
|
| 182 |
+
laterals[i], size=prev_shape, **self.upsample_cfg)
|
| 183 |
+
|
| 184 |
+
# build outputs
|
| 185 |
+
# part 1: from original levels
|
| 186 |
+
outs = [
|
| 187 |
+
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
|
| 188 |
+
]
|
| 189 |
+
# part 2: add extra levels
|
| 190 |
+
if self.num_outs > len(outs):
|
| 191 |
+
# use max pool to get more levels on top of outputs
|
| 192 |
+
# (e.g., Faster R-CNN, Mask R-CNN)
|
| 193 |
+
if not self.add_extra_convs:
|
| 194 |
+
for i in range(self.num_outs - used_backbone_levels):
|
| 195 |
+
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
|
| 196 |
+
# add conv layers on top of original feature maps (RetinaNet)
|
| 197 |
+
else:
|
| 198 |
+
if self.add_extra_convs == 'on_input':
|
| 199 |
+
extra_source = inputs[self.backbone_end_level - 1]
|
| 200 |
+
elif self.add_extra_convs == 'on_lateral':
|
| 201 |
+
extra_source = laterals[-1]
|
| 202 |
+
elif self.add_extra_convs == 'on_output':
|
| 203 |
+
extra_source = outs[-1]
|
| 204 |
+
else:
|
| 205 |
+
raise NotImplementedError
|
| 206 |
+
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
|
| 207 |
+
for i in range(used_backbone_levels + 1, self.num_outs):
|
| 208 |
+
if self.relu_before_extra_convs:
|
| 209 |
+
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
|
| 210 |
+
else:
|
| 211 |
+
outs.append(self.fpn_convs[i](outs[-1]))
|
| 212 |
+
return tuple(outs)
|
RAVE-main/annotator/mmpkg/mmseg/models/necks/multilevel_neck.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from annotator.mmpkg.mmcv.cnn import ConvModule
|
| 4 |
+
|
| 5 |
+
from ..builder import NECKS
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@NECKS.register_module()
|
| 9 |
+
class MultiLevelNeck(nn.Module):
|
| 10 |
+
"""MultiLevelNeck.
|
| 11 |
+
|
| 12 |
+
A neck structure connect vit backbone and decoder_heads.
|
| 13 |
+
Args:
|
| 14 |
+
in_channels (List[int]): Number of input channels per scale.
|
| 15 |
+
out_channels (int): Number of output channels (used at each scale).
|
| 16 |
+
scales (List[int]): Scale factors for each input feature map.
|
| 17 |
+
norm_cfg (dict): Config dict for normalization layer. Default: None.
|
| 18 |
+
act_cfg (dict): Config dict for activation layer in ConvModule.
|
| 19 |
+
Default: None.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(self,
|
| 23 |
+
in_channels,
|
| 24 |
+
out_channels,
|
| 25 |
+
scales=[0.5, 1, 2, 4],
|
| 26 |
+
norm_cfg=None,
|
| 27 |
+
act_cfg=None):
|
| 28 |
+
super(MultiLevelNeck, self).__init__()
|
| 29 |
+
assert isinstance(in_channels, list)
|
| 30 |
+
self.in_channels = in_channels
|
| 31 |
+
self.out_channels = out_channels
|
| 32 |
+
self.scales = scales
|
| 33 |
+
self.num_outs = len(scales)
|
| 34 |
+
self.lateral_convs = nn.ModuleList()
|
| 35 |
+
self.convs = nn.ModuleList()
|
| 36 |
+
for in_channel in in_channels:
|
| 37 |
+
self.lateral_convs.append(
|
| 38 |
+
ConvModule(
|
| 39 |
+
in_channel,
|
| 40 |
+
out_channels,
|
| 41 |
+
kernel_size=1,
|
| 42 |
+
norm_cfg=norm_cfg,
|
| 43 |
+
act_cfg=act_cfg))
|
| 44 |
+
for _ in range(self.num_outs):
|
| 45 |
+
self.convs.append(
|
| 46 |
+
ConvModule(
|
| 47 |
+
out_channels,
|
| 48 |
+
out_channels,
|
| 49 |
+
kernel_size=3,
|
| 50 |
+
padding=1,
|
| 51 |
+
stride=1,
|
| 52 |
+
norm_cfg=norm_cfg,
|
| 53 |
+
act_cfg=act_cfg))
|
| 54 |
+
|
| 55 |
+
def forward(self, inputs):
|
| 56 |
+
assert len(inputs) == len(self.in_channels)
|
| 57 |
+
print(inputs[0].shape)
|
| 58 |
+
inputs = [
|
| 59 |
+
lateral_conv(inputs[i])
|
| 60 |
+
for i, lateral_conv in enumerate(self.lateral_convs)
|
| 61 |
+
]
|
| 62 |
+
# for len(inputs) not equal to self.num_outs
|
| 63 |
+
if len(inputs) == 1:
|
| 64 |
+
inputs = [inputs[0] for _ in range(self.num_outs)]
|
| 65 |
+
outs = []
|
| 66 |
+
for i in range(self.num_outs):
|
| 67 |
+
x_resize = F.interpolate(
|
| 68 |
+
inputs[i], scale_factor=self.scales[i], mode='bilinear')
|
| 69 |
+
outs.append(self.convs[i](x_resize))
|
| 70 |
+
return tuple(outs)
|
RAVE-main/annotator/uniformer/LICENSE
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright 2022 SenseTime X-Lab. All rights reserved.
|
| 2 |
+
|
| 3 |
+
Apache License
|
| 4 |
+
Version 2.0, January 2004
|
| 5 |
+
http://www.apache.org/licenses/
|
| 6 |
+
|
| 7 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 8 |
+
|
| 9 |
+
1. Definitions.
|
| 10 |
+
|
| 11 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 12 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 13 |
+
|
| 14 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 15 |
+
the copyright owner that is granting the License.
|
| 16 |
+
|
| 17 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 18 |
+
other entities that control, are controlled by, or are under common
|
| 19 |
+
control with that entity. For the purposes of this definition,
|
| 20 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 21 |
+
direction or management of such entity, whether by contract or
|
| 22 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 23 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 24 |
+
|
| 25 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 26 |
+
exercising permissions granted by this License.
|
| 27 |
+
|
| 28 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 29 |
+
including but not limited to software source code, documentation
|
| 30 |
+
source, and configuration files.
|
| 31 |
+
|
| 32 |
+
"Object" form shall mean any form resulting from mechanical
|
| 33 |
+
transformation or translation of a Source form, including but
|
| 34 |
+
not limited to compiled object code, generated documentation,
|
| 35 |
+
and conversions to other media types.
|
| 36 |
+
|
| 37 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 38 |
+
Object form, made available under the License, as indicated by a
|
| 39 |
+
copyright notice that is included in or attached to the work
|
| 40 |
+
(an example is provided in the Appendix below).
|
| 41 |
+
|
| 42 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 43 |
+
form, that is based on (or derived from) the Work and for which the
|
| 44 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 45 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 46 |
+
of this License, Derivative Works shall not include works that remain
|
| 47 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 48 |
+
the Work and Derivative Works thereof.
|
| 49 |
+
|
| 50 |
+
"Contribution" shall mean any work of authorship, including
|
| 51 |
+
the original version of the Work and any modifications or additions
|
| 52 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 53 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 54 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 55 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 56 |
+
means any form of electronic, verbal, or written communication sent
|
| 57 |
+
to the Licensor or its representatives, including but not limited to
|
| 58 |
+
communication on electronic mailing lists, source code control systems,
|
| 59 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 60 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 61 |
+
excluding communication that is conspicuously marked or otherwise
|
| 62 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 63 |
+
|
| 64 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 65 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 66 |
+
subsequently incorporated within the Work.
|
| 67 |
+
|
| 68 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 69 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 70 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 71 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 72 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 73 |
+
Work and such Derivative Works in Source or Object form.
|
| 74 |
+
|
| 75 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 76 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 77 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 78 |
+
(except as stated in this section) patent license to make, have made,
|
| 79 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 80 |
+
where such license applies only to those patent claims licensable
|
| 81 |
+
by such Contributor that are necessarily infringed by their
|
| 82 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 83 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 84 |
+
institute patent litigation against any entity (including a
|
| 85 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 86 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 87 |
+
or contributory patent infringement, then any patent licenses
|
| 88 |
+
granted to You under this License for that Work shall terminate
|
| 89 |
+
as of the date such litigation is filed.
|
| 90 |
+
|
| 91 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 92 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 93 |
+
modifications, and in Source or Object form, provided that You
|
| 94 |
+
meet the following conditions:
|
| 95 |
+
|
| 96 |
+
(a) You must give any other recipients of the Work or
|
| 97 |
+
Derivative Works a copy of this License; and
|
| 98 |
+
|
| 99 |
+
(b) You must cause any modified files to carry prominent notices
|
| 100 |
+
stating that You changed the files; and
|
| 101 |
+
|
| 102 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 103 |
+
that You distribute, all copyright, patent, trademark, and
|
| 104 |
+
attribution notices from the Source form of the Work,
|
| 105 |
+
excluding those notices that do not pertain to any part of
|
| 106 |
+
the Derivative Works; and
|
| 107 |
+
|
| 108 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 109 |
+
distribution, then any Derivative Works that You distribute must
|
| 110 |
+
include a readable copy of the attribution notices contained
|
| 111 |
+
within such NOTICE file, excluding those notices that do not
|
| 112 |
+
pertain to any part of the Derivative Works, in at least one
|
| 113 |
+
of the following places: within a NOTICE text file distributed
|
| 114 |
+
as part of the Derivative Works; within the Source form or
|
| 115 |
+
documentation, if provided along with the Derivative Works; or,
|
| 116 |
+
within a display generated by the Derivative Works, if and
|
| 117 |
+
wherever such third-party notices normally appear. The contents
|
| 118 |
+
of the NOTICE file are for informational purposes only and
|
| 119 |
+
do not modify the License. You may add Your own attribution
|
| 120 |
+
notices within Derivative Works that You distribute, alongside
|
| 121 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 122 |
+
that such additional attribution notices cannot be construed
|
| 123 |
+
as modifying the License.
|
| 124 |
+
|
| 125 |
+
You may add Your own copyright statement to Your modifications and
|
| 126 |
+
may provide additional or different license terms and conditions
|
| 127 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 128 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 129 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 130 |
+
the conditions stated in this License.
|
| 131 |
+
|
| 132 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 133 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 134 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 135 |
+
this License, without any additional terms or conditions.
|
| 136 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 137 |
+
the terms of any separate license agreement you may have executed
|
| 138 |
+
with Licensor regarding such Contributions.
|
| 139 |
+
|
| 140 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 141 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 142 |
+
except as required for reasonable and customary use in describing the
|
| 143 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 144 |
+
|
| 145 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 146 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 147 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 148 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 149 |
+
implied, including, without limitation, any warranties or conditions
|
| 150 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 151 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 152 |
+
appropriateness of using or redistributing the Work and assume any
|
| 153 |
+
risks associated with Your exercise of permissions under this License.
|
| 154 |
+
|
| 155 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 156 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 157 |
+
unless required by applicable law (such as deliberate and grossly
|
| 158 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 159 |
+
liable to You for damages, including any direct, indirect, special,
|
| 160 |
+
incidental, or consequential damages of any character arising as a
|
| 161 |
+
result of this License or out of the use or inability to use the
|
| 162 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 163 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 164 |
+
other commercial damages or losses), even if such Contributor
|
| 165 |
+
has been advised of the possibility of such damages.
|
| 166 |
+
|
| 167 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 168 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 169 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 170 |
+
or other liability obligations and/or rights consistent with this
|
| 171 |
+
License. However, in accepting such obligations, You may act only
|
| 172 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 173 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 174 |
+
defend, and hold each Contributor harmless for any liability
|
| 175 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 176 |
+
of your accepting any such warranty or additional liability.
|
| 177 |
+
|
| 178 |
+
END OF TERMS AND CONDITIONS
|
| 179 |
+
|
| 180 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 181 |
+
|
| 182 |
+
To apply the Apache License to your work, attach the following
|
| 183 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 184 |
+
replaced with your own identifying information. (Don't include
|
| 185 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 186 |
+
comment syntax for the file format. We also recommend that a
|
| 187 |
+
file or class name and description of purpose be included on the
|
| 188 |
+
same "printed page" as the copyright notice for easier
|
| 189 |
+
identification within third-party archives.
|
| 190 |
+
|
| 191 |
+
Copyright 2022 SenseTime X-Lab.
|
| 192 |
+
|
| 193 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 194 |
+
you may not use this file except in compliance with the License.
|
| 195 |
+
You may obtain a copy of the License at
|
| 196 |
+
|
| 197 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 198 |
+
|
| 199 |
+
Unless required by applicable law or agreed to in writing, software
|
| 200 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 201 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 202 |
+
See the License for the specific language governing permissions and
|
| 203 |
+
limitations under the License.
|
RAVE-main/annotator/uniformer/__init__.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from annotator.annotator_path import models_path
|
| 3 |
+
from modules import devices
|
| 4 |
+
from annotator.uniformer.inference import init_segmentor, inference_segmentor, show_result_pyplot
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
from mmseg.core.evaluation import get_palette
|
| 8 |
+
except ImportError:
|
| 9 |
+
from annotator.mmpkg.mmseg.core.evaluation import get_palette
|
| 10 |
+
|
| 11 |
+
modeldir = os.path.join(models_path, "uniformer")
|
| 12 |
+
checkpoint_file = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/upernet_global_small.pth"
|
| 13 |
+
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "upernet_global_small.py")
|
| 14 |
+
old_modeldir = os.path.dirname(os.path.realpath(__file__))
|
| 15 |
+
model = None
|
| 16 |
+
|
| 17 |
+
def unload_uniformer_model():
|
| 18 |
+
global model
|
| 19 |
+
if model is not None:
|
| 20 |
+
model = model.cpu()
|
| 21 |
+
|
| 22 |
+
def apply_uniformer(img):
|
| 23 |
+
global model
|
| 24 |
+
if model is None:
|
| 25 |
+
modelpath = os.path.join(modeldir, "upernet_global_small.pth")
|
| 26 |
+
old_modelpath = os.path.join(old_modeldir, "upernet_global_small.pth")
|
| 27 |
+
if os.path.exists(old_modelpath):
|
| 28 |
+
modelpath = old_modelpath
|
| 29 |
+
elif not os.path.exists(modelpath):
|
| 30 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 31 |
+
load_file_from_url(checkpoint_file, model_dir=modeldir)
|
| 32 |
+
|
| 33 |
+
model = init_segmentor(config_file, modelpath, device=devices.get_device_for("controlnet"))
|
| 34 |
+
model = model.to(devices.get_device_for("controlnet"))
|
| 35 |
+
|
| 36 |
+
if devices.get_device_for("controlnet").type == 'mps':
|
| 37 |
+
# adaptive_avg_pool2d can fail on MPS, workaround with CPU
|
| 38 |
+
import torch.nn.functional
|
| 39 |
+
|
| 40 |
+
orig_adaptive_avg_pool2d = torch.nn.functional.adaptive_avg_pool2d
|
| 41 |
+
def cpu_if_exception(input, *args, **kwargs):
|
| 42 |
+
try:
|
| 43 |
+
return orig_adaptive_avg_pool2d(input, *args, **kwargs)
|
| 44 |
+
except:
|
| 45 |
+
return orig_adaptive_avg_pool2d(input.cpu(), *args, **kwargs).to(input.device)
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
torch.nn.functional.adaptive_avg_pool2d = cpu_if_exception
|
| 49 |
+
result = inference_segmentor(model, img)
|
| 50 |
+
finally:
|
| 51 |
+
torch.nn.functional.adaptive_avg_pool2d = orig_adaptive_avg_pool2d
|
| 52 |
+
else:
|
| 53 |
+
result = inference_segmentor(model, img)
|
| 54 |
+
|
| 55 |
+
res_img = show_result_pyplot(model, img, result, get_palette('ade'), opacity=1)
|
| 56 |
+
return res_img
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/ade20k.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'ADE20KDataset'
|
| 3 |
+
data_root = 'data/ade/ADEChallengeData2016'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='images/training',
|
| 41 |
+
ann_dir='annotations/training',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='images/validation',
|
| 47 |
+
ann_dir='annotations/validation',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='images/validation',
|
| 53 |
+
ann_dir='annotations/validation',
|
| 54 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/chase_db1.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'ChaseDB1Dataset'
|
| 3 |
+
data_root = 'data/CHASE_DB1'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (960, 999)
|
| 7 |
+
crop_size = (128, 128)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/cityscapes.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'CityscapesDataset'
|
| 3 |
+
data_root = 'data/cityscapes/'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 1024)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations'),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 1024),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=2,
|
| 36 |
+
workers_per_gpu=2,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='leftImg8bit/train',
|
| 41 |
+
ann_dir='gtFine/train',
|
| 42 |
+
pipeline=train_pipeline),
|
| 43 |
+
val=dict(
|
| 44 |
+
type=dataset_type,
|
| 45 |
+
data_root=data_root,
|
| 46 |
+
img_dir='leftImg8bit/val',
|
| 47 |
+
ann_dir='gtFine/val',
|
| 48 |
+
pipeline=test_pipeline),
|
| 49 |
+
test=dict(
|
| 50 |
+
type=dataset_type,
|
| 51 |
+
data_root=data_root,
|
| 52 |
+
img_dir='leftImg8bit/val',
|
| 53 |
+
ann_dir='gtFine/val',
|
| 54 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = './cityscapes.py'
|
| 2 |
+
img_norm_cfg = dict(
|
| 3 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 4 |
+
crop_size = (769, 769)
|
| 5 |
+
train_pipeline = [
|
| 6 |
+
dict(type='LoadImageFromFile'),
|
| 7 |
+
dict(type='LoadAnnotations'),
|
| 8 |
+
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
|
| 9 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 10 |
+
dict(type='RandomFlip', prob=0.5),
|
| 11 |
+
dict(type='PhotoMetricDistortion'),
|
| 12 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 13 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 14 |
+
dict(type='DefaultFormatBundle'),
|
| 15 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 16 |
+
]
|
| 17 |
+
test_pipeline = [
|
| 18 |
+
dict(type='LoadImageFromFile'),
|
| 19 |
+
dict(
|
| 20 |
+
type='MultiScaleFlipAug',
|
| 21 |
+
img_scale=(2049, 1025),
|
| 22 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 23 |
+
flip=False,
|
| 24 |
+
transforms=[
|
| 25 |
+
dict(type='Resize', keep_ratio=True),
|
| 26 |
+
dict(type='RandomFlip'),
|
| 27 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 28 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 29 |
+
dict(type='Collect', keys=['img']),
|
| 30 |
+
])
|
| 31 |
+
]
|
| 32 |
+
data = dict(
|
| 33 |
+
train=dict(pipeline=train_pipeline),
|
| 34 |
+
val=dict(pipeline=test_pipeline),
|
| 35 |
+
test=dict(pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/drive.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'DRIVEDataset'
|
| 3 |
+
data_root = 'data/DRIVE'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (584, 565)
|
| 7 |
+
crop_size = (64, 64)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/hrf.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'HRFDataset'
|
| 3 |
+
data_root = 'data/HRF'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (2336, 3504)
|
| 7 |
+
crop_size = (256, 256)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_context.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PascalContextDataset'
|
| 3 |
+
data_root = 'data/VOCdevkit/VOC2010/'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
|
| 7 |
+
img_scale = (520, 520)
|
| 8 |
+
crop_size = (480, 480)
|
| 9 |
+
|
| 10 |
+
train_pipeline = [
|
| 11 |
+
dict(type='LoadImageFromFile'),
|
| 12 |
+
dict(type='LoadAnnotations'),
|
| 13 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 14 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 15 |
+
dict(type='RandomFlip', prob=0.5),
|
| 16 |
+
dict(type='PhotoMetricDistortion'),
|
| 17 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 18 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 19 |
+
dict(type='DefaultFormatBundle'),
|
| 20 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 21 |
+
]
|
| 22 |
+
test_pipeline = [
|
| 23 |
+
dict(type='LoadImageFromFile'),
|
| 24 |
+
dict(
|
| 25 |
+
type='MultiScaleFlipAug',
|
| 26 |
+
img_scale=img_scale,
|
| 27 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 28 |
+
flip=False,
|
| 29 |
+
transforms=[
|
| 30 |
+
dict(type='Resize', keep_ratio=True),
|
| 31 |
+
dict(type='RandomFlip'),
|
| 32 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 33 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 34 |
+
dict(type='Collect', keys=['img']),
|
| 35 |
+
])
|
| 36 |
+
]
|
| 37 |
+
data = dict(
|
| 38 |
+
samples_per_gpu=4,
|
| 39 |
+
workers_per_gpu=4,
|
| 40 |
+
train=dict(
|
| 41 |
+
type=dataset_type,
|
| 42 |
+
data_root=data_root,
|
| 43 |
+
img_dir='JPEGImages',
|
| 44 |
+
ann_dir='SegmentationClassContext',
|
| 45 |
+
split='ImageSets/SegmentationContext/train.txt',
|
| 46 |
+
pipeline=train_pipeline),
|
| 47 |
+
val=dict(
|
| 48 |
+
type=dataset_type,
|
| 49 |
+
data_root=data_root,
|
| 50 |
+
img_dir='JPEGImages',
|
| 51 |
+
ann_dir='SegmentationClassContext',
|
| 52 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='JPEGImages',
|
| 58 |
+
ann_dir='SegmentationClassContext',
|
| 59 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 60 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PascalContextDataset59'
|
| 3 |
+
data_root = 'data/VOCdevkit/VOC2010/'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
|
| 7 |
+
img_scale = (520, 520)
|
| 8 |
+
crop_size = (480, 480)
|
| 9 |
+
|
| 10 |
+
train_pipeline = [
|
| 11 |
+
dict(type='LoadImageFromFile'),
|
| 12 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 13 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 14 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 15 |
+
dict(type='RandomFlip', prob=0.5),
|
| 16 |
+
dict(type='PhotoMetricDistortion'),
|
| 17 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 18 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 19 |
+
dict(type='DefaultFormatBundle'),
|
| 20 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 21 |
+
]
|
| 22 |
+
test_pipeline = [
|
| 23 |
+
dict(type='LoadImageFromFile'),
|
| 24 |
+
dict(
|
| 25 |
+
type='MultiScaleFlipAug',
|
| 26 |
+
img_scale=img_scale,
|
| 27 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 28 |
+
flip=False,
|
| 29 |
+
transforms=[
|
| 30 |
+
dict(type='Resize', keep_ratio=True),
|
| 31 |
+
dict(type='RandomFlip'),
|
| 32 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 33 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 34 |
+
dict(type='Collect', keys=['img']),
|
| 35 |
+
])
|
| 36 |
+
]
|
| 37 |
+
data = dict(
|
| 38 |
+
samples_per_gpu=4,
|
| 39 |
+
workers_per_gpu=4,
|
| 40 |
+
train=dict(
|
| 41 |
+
type=dataset_type,
|
| 42 |
+
data_root=data_root,
|
| 43 |
+
img_dir='JPEGImages',
|
| 44 |
+
ann_dir='SegmentationClassContext',
|
| 45 |
+
split='ImageSets/SegmentationContext/train.txt',
|
| 46 |
+
pipeline=train_pipeline),
|
| 47 |
+
val=dict(
|
| 48 |
+
type=dataset_type,
|
| 49 |
+
data_root=data_root,
|
| 50 |
+
img_dir='JPEGImages',
|
| 51 |
+
ann_dir='SegmentationClassContext',
|
| 52 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='JPEGImages',
|
| 58 |
+
ann_dir='SegmentationClassContext',
|
| 59 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 60 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_voc12.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PascalVOCDataset'
|
| 3 |
+
data_root = 'data/VOCdevkit/VOC2012'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
crop_size = (512, 512)
|
| 7 |
+
train_pipeline = [
|
| 8 |
+
dict(type='LoadImageFromFile'),
|
| 9 |
+
dict(type='LoadAnnotations'),
|
| 10 |
+
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
| 11 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 12 |
+
dict(type='RandomFlip', prob=0.5),
|
| 13 |
+
dict(type='PhotoMetricDistortion'),
|
| 14 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 15 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 16 |
+
dict(type='DefaultFormatBundle'),
|
| 17 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 18 |
+
]
|
| 19 |
+
test_pipeline = [
|
| 20 |
+
dict(type='LoadImageFromFile'),
|
| 21 |
+
dict(
|
| 22 |
+
type='MultiScaleFlipAug',
|
| 23 |
+
img_scale=(2048, 512),
|
| 24 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 25 |
+
flip=False,
|
| 26 |
+
transforms=[
|
| 27 |
+
dict(type='Resize', keep_ratio=True),
|
| 28 |
+
dict(type='RandomFlip'),
|
| 29 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 30 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 31 |
+
dict(type='Collect', keys=['img']),
|
| 32 |
+
])
|
| 33 |
+
]
|
| 34 |
+
data = dict(
|
| 35 |
+
samples_per_gpu=4,
|
| 36 |
+
workers_per_gpu=4,
|
| 37 |
+
train=dict(
|
| 38 |
+
type=dataset_type,
|
| 39 |
+
data_root=data_root,
|
| 40 |
+
img_dir='JPEGImages',
|
| 41 |
+
ann_dir='SegmentationClass',
|
| 42 |
+
split='ImageSets/Segmentation/train.txt',
|
| 43 |
+
pipeline=train_pipeline),
|
| 44 |
+
val=dict(
|
| 45 |
+
type=dataset_type,
|
| 46 |
+
data_root=data_root,
|
| 47 |
+
img_dir='JPEGImages',
|
| 48 |
+
ann_dir='SegmentationClass',
|
| 49 |
+
split='ImageSets/Segmentation/val.txt',
|
| 50 |
+
pipeline=test_pipeline),
|
| 51 |
+
test=dict(
|
| 52 |
+
type=dataset_type,
|
| 53 |
+
data_root=data_root,
|
| 54 |
+
img_dir='JPEGImages',
|
| 55 |
+
ann_dir='SegmentationClass',
|
| 56 |
+
split='ImageSets/Segmentation/val.txt',
|
| 57 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/pascal_voc12_aug.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = './pascal_voc12.py'
|
| 2 |
+
# dataset settings
|
| 3 |
+
data = dict(
|
| 4 |
+
train=dict(
|
| 5 |
+
ann_dir=['SegmentationClass', 'SegmentationClassAug'],
|
| 6 |
+
split=[
|
| 7 |
+
'ImageSets/Segmentation/train.txt',
|
| 8 |
+
'ImageSets/Segmentation/aug.txt'
|
| 9 |
+
]))
|
RAVE-main/annotator/uniformer/configs/_base_/datasets/stare.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'STAREDataset'
|
| 3 |
+
data_root = 'data/STARE'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (605, 700)
|
| 7 |
+
crop_size = (128, 128)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
RAVE-main/annotator/uniformer/configs/_base_/default_runtime.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# yapf:disable
|
| 2 |
+
log_config = dict(
|
| 3 |
+
interval=50,
|
| 4 |
+
hooks=[
|
| 5 |
+
dict(type='TextLoggerHook', by_epoch=False),
|
| 6 |
+
# dict(type='TensorboardLoggerHook')
|
| 7 |
+
])
|
| 8 |
+
# yapf:enable
|
| 9 |
+
dist_params = dict(backend='nccl')
|
| 10 |
+
log_level = 'INFO'
|
| 11 |
+
load_from = None
|
| 12 |
+
resume_from = None
|
| 13 |
+
workflow = [('train', 1)]
|
| 14 |
+
cudnn_benchmark = True
|
RAVE-main/annotator/uniformer/configs/_base_/models/ann_r50-d8.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='ANNHead',
|
| 19 |
+
in_channels=[1024, 2048],
|
| 20 |
+
in_index=[2, 3],
|
| 21 |
+
channels=512,
|
| 22 |
+
project_channels=256,
|
| 23 |
+
query_scales=(1, ),
|
| 24 |
+
key_pool_scales=(1, 3, 6, 8),
|
| 25 |
+
dropout_ratio=0.1,
|
| 26 |
+
num_classes=19,
|
| 27 |
+
norm_cfg=norm_cfg,
|
| 28 |
+
align_corners=False,
|
| 29 |
+
loss_decode=dict(
|
| 30 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 31 |
+
auxiliary_head=dict(
|
| 32 |
+
type='FCNHead',
|
| 33 |
+
in_channels=1024,
|
| 34 |
+
in_index=2,
|
| 35 |
+
channels=256,
|
| 36 |
+
num_convs=1,
|
| 37 |
+
concat_input=False,
|
| 38 |
+
dropout_ratio=0.1,
|
| 39 |
+
num_classes=19,
|
| 40 |
+
norm_cfg=norm_cfg,
|
| 41 |
+
align_corners=False,
|
| 42 |
+
loss_decode=dict(
|
| 43 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 44 |
+
# model training and testing settings
|
| 45 |
+
train_cfg=dict(),
|
| 46 |
+
test_cfg=dict(mode='whole'))
|
RAVE-main/annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='APCHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
pool_scales=(1, 2, 3, 6),
|
| 23 |
+
dropout_ratio=0.1,
|
| 24 |
+
num_classes=19,
|
| 25 |
+
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
| 26 |
+
align_corners=False,
|
| 27 |
+
loss_decode=dict(
|
| 28 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 29 |
+
auxiliary_head=dict(
|
| 30 |
+
type='FCNHead',
|
| 31 |
+
in_channels=1024,
|
| 32 |
+
in_index=2,
|
| 33 |
+
channels=256,
|
| 34 |
+
num_convs=1,
|
| 35 |
+
concat_input=False,
|
| 36 |
+
dropout_ratio=0.1,
|
| 37 |
+
num_classes=19,
|
| 38 |
+
norm_cfg=norm_cfg,
|
| 39 |
+
align_corners=False,
|
| 40 |
+
loss_decode=dict(
|
| 41 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 42 |
+
# model training and testing settings
|
| 43 |
+
train_cfg=dict(),
|
| 44 |
+
test_cfg=dict(mode='whole'))
|
RAVE-main/annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='CCHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
recurrence=2,
|
| 23 |
+
dropout_ratio=0.1,
|
| 24 |
+
num_classes=19,
|
| 25 |
+
norm_cfg=norm_cfg,
|
| 26 |
+
align_corners=False,
|
| 27 |
+
loss_decode=dict(
|
| 28 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 29 |
+
auxiliary_head=dict(
|
| 30 |
+
type='FCNHead',
|
| 31 |
+
in_channels=1024,
|
| 32 |
+
in_index=2,
|
| 33 |
+
channels=256,
|
| 34 |
+
num_convs=1,
|
| 35 |
+
concat_input=False,
|
| 36 |
+
dropout_ratio=0.1,
|
| 37 |
+
num_classes=19,
|
| 38 |
+
norm_cfg=norm_cfg,
|
| 39 |
+
align_corners=False,
|
| 40 |
+
loss_decode=dict(
|
| 41 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 42 |
+
# model training and testing settings
|
| 43 |
+
train_cfg=dict(),
|
| 44 |
+
test_cfg=dict(mode='whole'))
|
RAVE-main/annotator/uniformer/configs/_base_/models/cgnet.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
backbone=dict(
|
| 6 |
+
type='CGNet',
|
| 7 |
+
norm_cfg=norm_cfg,
|
| 8 |
+
in_channels=3,
|
| 9 |
+
num_channels=(32, 64, 128),
|
| 10 |
+
num_blocks=(3, 21),
|
| 11 |
+
dilations=(2, 4),
|
| 12 |
+
reductions=(8, 16)),
|
| 13 |
+
decode_head=dict(
|
| 14 |
+
type='FCNHead',
|
| 15 |
+
in_channels=256,
|
| 16 |
+
in_index=2,
|
| 17 |
+
channels=256,
|
| 18 |
+
num_convs=0,
|
| 19 |
+
concat_input=False,
|
| 20 |
+
dropout_ratio=0,
|
| 21 |
+
num_classes=19,
|
| 22 |
+
norm_cfg=norm_cfg,
|
| 23 |
+
loss_decode=dict(
|
| 24 |
+
type='CrossEntropyLoss',
|
| 25 |
+
use_sigmoid=False,
|
| 26 |
+
loss_weight=1.0,
|
| 27 |
+
class_weight=[
|
| 28 |
+
2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352,
|
| 29 |
+
10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905,
|
| 30 |
+
10.347791, 6.3927646, 10.226669, 10.241062, 10.280587,
|
| 31 |
+
10.396974, 10.055647
|
| 32 |
+
])),
|
| 33 |
+
# model training and testing settings
|
| 34 |
+
train_cfg=dict(sampler=None),
|
| 35 |
+
test_cfg=dict(mode='whole'))
|
RAVE-main/annotator/uniformer/configs/_base_/models/danet_r50-d8.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='DAHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
pam_channels=64,
|
| 23 |
+
dropout_ratio=0.1,
|
| 24 |
+
num_classes=19,
|
| 25 |
+
norm_cfg=norm_cfg,
|
| 26 |
+
align_corners=False,
|
| 27 |
+
loss_decode=dict(
|
| 28 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 29 |
+
auxiliary_head=dict(
|
| 30 |
+
type='FCNHead',
|
| 31 |
+
in_channels=1024,
|
| 32 |
+
in_index=2,
|
| 33 |
+
channels=256,
|
| 34 |
+
num_convs=1,
|
| 35 |
+
concat_input=False,
|
| 36 |
+
dropout_ratio=0.1,
|
| 37 |
+
num_classes=19,
|
| 38 |
+
norm_cfg=norm_cfg,
|
| 39 |
+
align_corners=False,
|
| 40 |
+
loss_decode=dict(
|
| 41 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 42 |
+
# model training and testing settings
|
| 43 |
+
train_cfg=dict(),
|
| 44 |
+
test_cfg=dict(mode='whole'))
|
RAVE-main/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='ASPPHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
dilations=(1, 12, 24, 36),
|
| 23 |
+
dropout_ratio=0.1,
|
| 24 |
+
num_classes=19,
|
| 25 |
+
norm_cfg=norm_cfg,
|
| 26 |
+
align_corners=False,
|
| 27 |
+
loss_decode=dict(
|
| 28 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 29 |
+
auxiliary_head=dict(
|
| 30 |
+
type='FCNHead',
|
| 31 |
+
in_channels=1024,
|
| 32 |
+
in_index=2,
|
| 33 |
+
channels=256,
|
| 34 |
+
num_convs=1,
|
| 35 |
+
concat_input=False,
|
| 36 |
+
dropout_ratio=0.1,
|
| 37 |
+
num_classes=19,
|
| 38 |
+
norm_cfg=norm_cfg,
|
| 39 |
+
align_corners=False,
|
| 40 |
+
loss_decode=dict(
|
| 41 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 42 |
+
# model training and testing settings
|
| 43 |
+
train_cfg=dict(),
|
| 44 |
+
test_cfg=dict(mode='whole'))
|
RAVE-main/annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained=None,
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='UNet',
|
| 8 |
+
in_channels=3,
|
| 9 |
+
base_channels=64,
|
| 10 |
+
num_stages=5,
|
| 11 |
+
strides=(1, 1, 1, 1, 1),
|
| 12 |
+
enc_num_convs=(2, 2, 2, 2, 2),
|
| 13 |
+
dec_num_convs=(2, 2, 2, 2),
|
| 14 |
+
downsamples=(True, True, True, True),
|
| 15 |
+
enc_dilations=(1, 1, 1, 1, 1),
|
| 16 |
+
dec_dilations=(1, 1, 1, 1),
|
| 17 |
+
with_cp=False,
|
| 18 |
+
conv_cfg=None,
|
| 19 |
+
norm_cfg=norm_cfg,
|
| 20 |
+
act_cfg=dict(type='ReLU'),
|
| 21 |
+
upsample_cfg=dict(type='InterpConv'),
|
| 22 |
+
norm_eval=False),
|
| 23 |
+
decode_head=dict(
|
| 24 |
+
type='ASPPHead',
|
| 25 |
+
in_channels=64,
|
| 26 |
+
in_index=4,
|
| 27 |
+
channels=16,
|
| 28 |
+
dilations=(1, 12, 24, 36),
|
| 29 |
+
dropout_ratio=0.1,
|
| 30 |
+
num_classes=2,
|
| 31 |
+
norm_cfg=norm_cfg,
|
| 32 |
+
align_corners=False,
|
| 33 |
+
loss_decode=dict(
|
| 34 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 35 |
+
auxiliary_head=dict(
|
| 36 |
+
type='FCNHead',
|
| 37 |
+
in_channels=128,
|
| 38 |
+
in_index=3,
|
| 39 |
+
channels=64,
|
| 40 |
+
num_convs=1,
|
| 41 |
+
concat_input=False,
|
| 42 |
+
dropout_ratio=0.1,
|
| 43 |
+
num_classes=2,
|
| 44 |
+
norm_cfg=norm_cfg,
|
| 45 |
+
align_corners=False,
|
| 46 |
+
loss_decode=dict(
|
| 47 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 48 |
+
# model training and testing settings
|
| 49 |
+
train_cfg=dict(),
|
| 50 |
+
test_cfg=dict(mode='slide', crop_size=256, stride=170))
|
RAVE-main/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='DepthwiseSeparableASPPHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
dilations=(1, 12, 24, 36),
|
| 23 |
+
c1_in_channels=256,
|
| 24 |
+
c1_channels=48,
|
| 25 |
+
dropout_ratio=0.1,
|
| 26 |
+
num_classes=19,
|
| 27 |
+
norm_cfg=norm_cfg,
|
| 28 |
+
align_corners=False,
|
| 29 |
+
loss_decode=dict(
|
| 30 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 31 |
+
auxiliary_head=dict(
|
| 32 |
+
type='FCNHead',
|
| 33 |
+
in_channels=1024,
|
| 34 |
+
in_index=2,
|
| 35 |
+
channels=256,
|
| 36 |
+
num_convs=1,
|
| 37 |
+
concat_input=False,
|
| 38 |
+
dropout_ratio=0.1,
|
| 39 |
+
num_classes=19,
|
| 40 |
+
norm_cfg=norm_cfg,
|
| 41 |
+
align_corners=False,
|
| 42 |
+
loss_decode=dict(
|
| 43 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 44 |
+
# model training and testing settings
|
| 45 |
+
train_cfg=dict(),
|
| 46 |
+
test_cfg=dict(mode='whole'))
|