code stringlengths 17 6.64M |
|---|
class RandomCrop_new(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, sample):
(img, mask) = (sample['image'], sample['label'])
if (self.padding > 0):
img = ImageOps.expand(img, border=self.padding, fill=0)
mask = ImageOps.expand(mask, border=self.padding, fill=0)
assert (img.size == mask.size)
(w, h) = img.size
(th, tw) = self.size
if ((w == tw) and (h == th)):
return {'image': img, 'label': mask}
new_img = Image.new('RGB', (tw, th), 'black')
new_mask = Image.new('L', (tw, th), 'white')
x1 = y1 = 0
if (w > tw):
x1 = random.randint(0, (w - tw))
if (h > th):
y1 = random.randint(0, (h - th))
img = img.crop((x1, y1, (x1 + tw), (y1 + th)))
mask = mask.crop((x1, y1, (x1 + tw), (y1 + th)))
new_img.paste(img, (0, 0))
new_mask.paste(mask, (0, 0))
return {'image': new_img, 'label': new_mask}
|
class Paste(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
(img, mask) = (sample['image'], sample['label'])
assert (img.size == mask.size)
(w, h) = img.size
(th, tw) = self.size
assert ((w <= tw) and (h <= th))
if ((w == tw) and (h == th)):
return {'image': img, 'label': mask}
new_img = Image.new('RGB', (tw, th), 'black')
new_mask = Image.new('L', (tw, th), 'white')
new_img.paste(img, (0, 0))
new_mask.paste(mask, (0, 0))
return {'image': new_img, 'label': new_mask}
|
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
(w, h) = img.size
(th, tw) = self.size
x1 = int(round(((w - tw) / 2.0)))
y1 = int(round(((h - th) / 2.0)))
img = img.crop((x1, y1, (x1 + tw), (y1 + th)))
mask = mask.crop((x1, y1, (x1 + tw), (y1 + th)))
return {'image': img, 'label': mask}
|
class RandomHorizontalFlip(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
if (random.random() < 0.5):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': img, 'label': mask}
|
class HorizontalFlip(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': img, 'label': mask}
|
class HorizontalFlip_only_img(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': img, 'label': mask}
|
class RandomHorizontalFlip_cihp(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
if (random.random() < 0.5):
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': img, 'label': mask}
|
class Normalize(object):
'Normalize a tensor image with mean and standard deviation.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n '
def __init__(self, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)):
self.mean = mean
self.std = std
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
mask = np.array(sample['label']).astype(np.float32)
img /= 255.0
img -= self.mean
img /= self.std
return {'image': img, 'label': mask}
|
class Normalize_255(object):
'Normalize a tensor image with mean and standard deviation. tf use 255.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n '
def __init__(self, mean=(123.15, 115.9, 103.06), std=(1.0, 1.0, 1.0)):
self.mean = mean
self.std = std
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
mask = np.array(sample['label']).astype(np.float32)
img -= self.mean
img /= self.std
img = img
img = img[([0, 3, 2, 1], ...)]
return {'image': img, 'label': mask}
|
class Normalize_xception_tf(object):
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
mask = np.array(sample['label']).astype(np.float32)
img = (((img * 2.0) / 255.0) - 1)
return {'image': img, 'label': mask}
|
class Normalize_xception_tf_only_img(object):
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
img = (((img * 2.0) / 255.0) - 1)
return {'image': img, 'label': sample['label']}
|
class Normalize_cityscapes(object):
'Normalize a tensor image with mean and standard deviation.\n Args:\n mean (tuple): means for each channel.\n std (tuple): standard deviations for each channel.\n '
def __init__(self, mean=(0.0, 0.0, 0.0)):
self.mean = mean
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
mask = np.array(sample['label']).astype(np.float32)
img -= self.mean
img /= 255.0
return {'image': img, 'label': mask}
|
class ToTensor_(object):
'Convert ndarrays in sample to Tensors.'
def __init__(self):
self.rgb2bgr = transforms.Lambda((lambda x: x[([2, 1, 0], ...)]))
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))
mask = np.expand_dims(np.array(sample['label']).astype(np.float32), (- 1)).transpose((2, 0, 1))
img = torch.from_numpy(img).float()
img = self.rgb2bgr(img)
mask = torch.from_numpy(mask).float()
return {'image': img, 'label': mask}
|
class ToTensor_only_img(object):
'Convert ndarrays in sample to Tensors.'
def __init__(self):
self.rgb2bgr = transforms.Lambda((lambda x: x[([2, 1, 0], ...)]))
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))
img = torch.from_numpy(img).float()
img = self.rgb2bgr(img)
return {'image': img, 'label': sample['label']}
|
class FixedResize(object):
def __init__(self, size):
self.size = tuple(reversed(size))
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
img = img.resize(self.size, Image.BILINEAR)
mask = mask.resize(self.size, Image.NEAREST)
return {'image': img, 'label': mask}
|
class Keep_origin_size_Resize(object):
def __init__(self, max_size, scale=1.0):
self.size = tuple(reversed(max_size))
self.scale = scale
self.paste = Paste(int((max_size[0] * scale)))
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
(h, w) = self.size
h = int((h * self.scale))
w = int((w * self.scale))
img = img.resize((h, w), Image.BILINEAR)
mask = mask.resize((h, w), Image.NEAREST)
return self.paste({'image': img, 'label': mask})
|
class Scale(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
(w, h) = img.size
if (((w >= h) and (w == self.size[1])) or ((h >= w) and (h == self.size[0]))):
return {'image': img, 'label': mask}
(oh, ow) = self.size
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
return {'image': img, 'label': mask}
|
class Scale_(object):
def __init__(self, scale):
self.scale = scale
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
(w, h) = img.size
ow = int((w * self.scale))
oh = int((h * self.scale))
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
return {'image': img, 'label': mask}
|
class Scale_only_img(object):
def __init__(self, scale):
self.scale = scale
def __call__(self, sample):
img = sample['image']
mask = sample['label']
(w, h) = img.size
ow = int((w * self.scale))
oh = int((h * self.scale))
img = img.resize((ow, oh), Image.BILINEAR)
return {'image': img, 'label': mask}
|
class RandomSizedCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
for attempt in range(10):
area = (img.size[0] * img.size[1])
target_area = (random.uniform(0.45, 1.0) * area)
aspect_ratio = random.uniform(0.5, 2)
w = int(round(math.sqrt((target_area * aspect_ratio))))
h = int(round(math.sqrt((target_area / aspect_ratio))))
if (random.random() < 0.5):
(w, h) = (h, w)
if ((w <= img.size[0]) and (h <= img.size[1])):
x1 = random.randint(0, (img.size[0] - w))
y1 = random.randint(0, (img.size[1] - h))
img = img.crop((x1, y1, (x1 + w), (y1 + h)))
mask = mask.crop((x1, y1, (x1 + w), (y1 + h)))
assert (img.size == (w, h))
img = img.resize((self.size, self.size), Image.BILINEAR)
mask = mask.resize((self.size, self.size), Image.NEAREST)
return {'image': img, 'label': mask}
scale = Scale(self.size)
crop = CenterCrop(self.size)
sample = crop(scale(sample))
return sample
|
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, sample):
img = sample['image']
mask = sample['label']
rotate_degree = (((random.random() * 2) * self.degree) - self.degree)
img = img.rotate(rotate_degree, Image.BILINEAR)
mask = mask.rotate(rotate_degree, Image.NEAREST)
return {'image': img, 'label': mask}
|
class RandomSized_new(object):
'what we use is this class to aug'
def __init__(self, size, scale1=0.5, scale2=2):
self.size = size
self.crop = RandomCrop_new(self.size)
self.small_scale = scale1
self.big_scale = scale2
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
w = int((random.uniform(self.small_scale, self.big_scale) * img.size[0]))
h = int((random.uniform(self.small_scale, self.big_scale) * img.size[1]))
(img, mask) = (img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST))
sample = {'image': img, 'label': mask}
return self.crop(sample)
|
class RandomScale(object):
def __init__(self, limit):
self.limit = limit
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert (img.size == mask.size)
scale = random.uniform(self.limit[0], self.limit[1])
w = int((scale * img.size[0]))
h = int((scale * img.size[1]))
(img, mask) = (img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST))
return {'image': img, 'label': mask}
|
class Path(object):
@staticmethod
def db_root_dir(database):
if (database == 'atr'):
return './data/datasets/ATR/'
else:
print('Database {} not available.'.format(database))
raise NotImplementedError
|
class Path(object):
@staticmethod
def db_root_dir(database):
if (database == 'cihp'):
return './data/datasets/CIHP_4w/'
else:
print('Database {} not available.'.format(database))
raise NotImplementedError
|
class Path(object):
@staticmethod
def db_root_dir(database):
if (database == 'pascal'):
return './data/datasets/pascal/'
else:
print('Database {} not available.'.format(database))
raise NotImplementedError
|
class VOCSegmentation(Dataset):
'\n Pascal dataset\n '
def __init__(self, base_dir=Path.db_root_dir('pascal'), split='train', transform=None):
'\n :param base_dir: path to PASCAL dataset directory\n :param split: train/val\n :param transform: transform to apply\n '
super(VOCSegmentation).__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
self._cat_dir = os.path.join(self._base_dir, 'SegmentationPart')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.transform = transform
_splits_dir = os.path.join(self._base_dir, 'list')
self.im_ids = []
self.images = []
self.categories = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, (splt + '_id.txt'))), 'r') as f:
lines = f.read().splitlines()
for (ii, line) in enumerate(lines):
_image = os.path.join(self._image_dir, (line + '.jpg'))
_cat = os.path.join(self._cat_dir, (line + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
assert (len(self.images) == len(self.categories))
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if (self.transform is not None):
sample = self.transform(sample)
return sample
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.open(self.categories[index])
return (_img, _target)
def __str__(self):
return (('PASCAL(split=' + str(self.split)) + ')')
|
class test_segmentation(VOCSegmentation):
def __init__(self, base_dir=Path.db_root_dir('pascal'), split='train', transform=None, flip=True):
super(test_segmentation, self).__init__(base_dir=base_dir, split=split, transform=transform)
self._flip_flag = flip
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if (self.transform is not None):
sample = self.transform(sample)
return sample
|
def get_parser():
'argparse begin'
parser = argparse.ArgumentParser()
LookupChoices = type('', (argparse.Action,), dict(__call__=(lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))))
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--batch', default=16, type=int)
parser.add_argument('--lr', default=1e-07, type=float)
parser.add_argument('--numworker', default=12, type=int)
parser.add_argument('--freezeBN', choices=dict(true=True, false=False), default=True, action=LookupChoices)
parser.add_argument('--step', default=30, type=int)
parser.add_argument('--txt_file', default=None, type=str)
parser.add_argument('--pred_path', default=None, type=str)
parser.add_argument('--gt_path', default=None, type=str)
parser.add_argument('--classes', default=7, type=int)
parser.add_argument('--testepoch', default=10, type=int)
opts = parser.parse_args()
return opts
|
def eval_(pred_path, gt_path, classes, txt_file):
pred_path = pred_path
gt_path = gt_path
with open(txt_file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
output_list = []
label_list = []
for (i, file) in enumerate(lines):
print(i)
file_name = (file + '.png')
try:
predict_pic = np.array(Image.open((pred_path + file_name)))
gt_pic = np.array(Image.open((gt_path + file_name)))
output_list.append(torch.from_numpy(predict_pic))
label_list.append(torch.from_numpy(gt_pic))
except:
print(file_name, flush=True)
raise RuntimeError('no predict/gt image.')
miou = test_human.get_iou_from_list(output_list, label_list, n_cls=classes)
print('Validation:')
print(('MIoU: %f\n' % miou))
|
def flip(x, dim):
indices = ([slice(None)] * x.dim())
indices[dim] = torch.arange((x.size(dim) - 1), (- 1), (- 1), dtype=torch.long, device=x.device)
return x[tuple(indices)]
|
def flip_cihp(tail_list):
'\n\n :param tail_list: tail_list size is 1 x n_class x h x w\n :return:\n '
tail_list_rev = ([None] * 20)
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev, dim=0)
|
def get_parser():
'argparse begin'
parser = argparse.ArgumentParser()
LookupChoices = type('', (argparse.Action,), dict(__call__=(lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))))
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--batch', default=16, type=int)
parser.add_argument('--lr', default=1e-07, type=float)
parser.add_argument('--numworker', default=12, type=int)
parser.add_argument('--freezeBN', choices=dict(true=True, false=False), default=True, action=LookupChoices)
parser.add_argument('--step', default=10, type=int)
parser.add_argument('--classes', default=20, type=int)
parser.add_argument('--testInterval', default=10, type=int)
parser.add_argument('--loadmodel', default='', type=str)
parser.add_argument('--pretrainedModel', default='', type=str)
parser.add_argument('--hidden_layers', default=128, type=int)
parser.add_argument('--gpus', default=4, type=int)
opts = parser.parse_args()
return opts
|
def get_graphs(opts):
adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj2 = adj2_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 20).transpose(2, 3).cuda()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).transpose(2, 3)
adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj3 = adj1_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 7).cuda()
adj3_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7)
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj3_ = Variable(torch.from_numpy(cihp_adj).float())
adj1 = adj3_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 20, 20).cuda()
adj1_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20)
train_graph = [adj1, adj2, adj3]
test_graph = [adj1_test, adj2_test, adj3_test]
return (train_graph, test_graph)
|
def val_cihp(net_, testloader, testloader_flip, test_graph, epoch, writer, criterion, classes=20):
(adj1_test, adj2_test, adj3_test) = test_graph
num_img_ts = len(testloader)
net_.eval()
pred_list = []
label_list = []
running_loss_ts = 0.0
miou = 0
for (ii, sample_batched) in enumerate(zip(testloader, testloader_flip)):
(inputs, labels) = (sample_batched[0]['image'], sample_batched[0]['label'])
(inputs_f, _) = (sample_batched[1]['image'], sample_batched[1]['label'])
inputs = torch.cat((inputs, inputs_f), dim=0)
(inputs, labels) = (Variable(inputs, requires_grad=False), Variable(labels))
if (gpu_id >= 0):
(inputs, labels) = (inputs.cuda(), labels.cuda())
with torch.no_grad():
outputs = net_.forward(inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda())
outputs = ((outputs[0] + flip(flip_cihp(outputs[1]), dim=(- 1))) / 2)
outputs = outputs.unsqueeze(0)
predictions = torch.max(outputs, 1)[1]
pred_list.append(predictions.cpu())
label_list.append(labels.squeeze(1).cpu())
loss = criterion(outputs, labels, batch_average=True)
running_loss_ts += loss.item()
if ((ii % num_img_ts) == (num_img_ts - 1)):
miou = get_iou_from_list(pred_list, label_list, n_cls=classes)
running_loss_ts = (running_loss_ts / num_img_ts)
print('Validation:')
print(('[Epoch: %d, numImages: %5d]' % (epoch, ((ii * 1) + inputs.data.shape[0]))))
writer.add_scalar('data/test_loss_epoch', running_loss_ts, epoch)
writer.add_scalar('data/test_miour', miou, epoch)
print(('Loss: %f' % running_loss_ts))
print(('MIoU: %f\n' % miou))
|
def main(opts):
p = OrderedDict()
p['trainBatch'] = opts.batch
testBatch = 1
useTest = True
nTestInterval = opts.testInterval
snapshot = 1
p['nAveGrad'] = 1
p['lr'] = opts.lr
p['lrFtr'] = 1e-05
p['lraspp'] = 1e-05
p['lrpro'] = 1e-05
p['lrdecoder'] = 1e-05
p['lrother'] = 1e-05
p['wd'] = 0.0005
p['momentum'] = 0.9
p['epoch_size'] = opts.step
p['num_workers'] = opts.numworker
model_path = opts.pretrainedModel
backbone = 'xception'
nEpochs = opts.epochs
max_id = 0
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[(- 1)]
runs = glob.glob(os.path.join(save_dir_root, 'run_cihp', 'run_*'))
for r in runs:
run_id = int(r.split('_')[(- 1)])
if (run_id >= max_id):
max_id = (run_id + 1)
save_dir = os.path.join(save_dir_root, 'run_cihp', ('run_' + str(max_id)))
if (backbone == 'xception'):
net_ = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=opts.classes, os=16, hidden_layers=opts.hidden_layers, source_classes=7)
elif (backbone == 'resnet'):
raise NotImplementedError
else:
raise NotImplementedError
modelName = ((('deeplabv3plus-' + backbone) + '-voc') + datetime.now().strftime('%b%d_%H-%M-%S'))
criterion = util.cross_entropy2d
if (gpu_id >= 0):
net_.cuda()
if (not (model_path == '')):
x = torch.load(model_path)
net_.load_state_dict_new(x)
print('load pretrainedModel:', model_path)
else:
print('no pretrainedModel.')
if (not (opts.loadmodel == '')):
x = torch.load(opts.loadmodel)
net_.load_source_model(x)
print('load model:', opts.loadmodel)
else:
print('no model load !!!!!!!!')
log_dir = os.path.join(save_dir, 'models', ((datetime.now().strftime('%b%d_%H-%M-%S') + '_') + socket.gethostname()))
writer = SummaryWriter(log_dir=log_dir)
writer.add_text('load model', opts.loadmodel, 1)
writer.add_text('setting', sys.argv[0], 1)
if opts.freezeBN:
net_.freeze_bn()
optimizer = optim.SGD(net_.parameters(), lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd'])
composed_transforms_tr = transforms.Compose([tr.RandomSized_new(512), tr.Normalize_xception_tf(), tr.ToTensor_()])
composed_transforms_ts = transforms.Compose([tr.Normalize_xception_tf(), tr.ToTensor_()])
composed_transforms_ts_flip = transforms.Compose([tr.HorizontalFlip(), tr.Normalize_xception_tf(), tr.ToTensor_()])
voc_train = cihp.VOCSegmentation(split='train', transform=composed_transforms_tr, flip=True)
voc_val = cihp.VOCSegmentation(split='val', transform=composed_transforms_ts)
voc_val_flip = cihp.VOCSegmentation(split='val', transform=composed_transforms_ts_flip)
trainloader = DataLoader(voc_train, batch_size=p['trainBatch'], shuffle=True, num_workers=p['num_workers'], drop_last=True)
testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=p['num_workers'])
testloader_flip = DataLoader(voc_val_flip, batch_size=testBatch, shuffle=False, num_workers=p['num_workers'])
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
running_loss_tr = 0.0
running_loss_ts = 0.0
aveGrad = 0
global_step = 0
print('Training Network')
net = torch.nn.DataParallel(net_)
(train_graph, test_graph) = get_graphs(opts)
(adj1, adj2, adj3) = train_graph
for epoch in range(resume_epoch, nEpochs):
start_time = timeit.default_timer()
if ((epoch % p['epoch_size']) == (p['epoch_size'] - 1)):
lr_ = util.lr_poly(p['lr'], epoch, nEpochs, 0.9)
optimizer = optim.SGD(net_.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd'])
writer.add_scalar('data/lr_', lr_, epoch)
print('(poly lr policy) learning rate: ', lr_)
net.train()
for (ii, sample_batched) in enumerate(trainloader):
(inputs, labels) = (sample_batched['image'], sample_batched['label'])
(inputs, labels) = (Variable(inputs, requires_grad=True), Variable(labels))
global_step += inputs.data.shape[0]
if (gpu_id >= 0):
(inputs, labels) = (inputs.cuda(), labels.cuda())
outputs = net.forward(inputs, adj1, adj3, adj2)
loss = criterion(outputs, labels, batch_average=True)
running_loss_tr += loss.item()
if ((ii % num_img_tr) == (num_img_tr - 1)):
running_loss_tr = (running_loss_tr / num_img_tr)
writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch)
print(('[Epoch: %d, numImages: %5d]' % (epoch, ((ii * p['trainBatch']) + inputs.data.shape[0]))))
print(('Loss: %f' % running_loss_tr))
running_loss_tr = 0
stop_time = timeit.default_timer()
print((('Execution time: ' + str((stop_time - start_time))) + '\n'))
loss /= p['nAveGrad']
loss.backward()
aveGrad += 1
if ((aveGrad % p['nAveGrad']) == 0):
writer.add_scalar('data/total_loss_iter', loss.item(), (ii + (num_img_tr * epoch)))
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
if ((ii % (num_img_tr // 10)) == 0):
grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(util.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(util.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step)
print('loss is ', loss.cpu().item(), flush=True)
if ((epoch % snapshot) == (snapshot - 1)):
torch.save(net_.state_dict(), os.path.join(save_dir, 'models', (((modelName + '_epoch-') + str(epoch)) + '.pth')))
print('Save model at {}\n'.format(os.path.join(save_dir, 'models', (((modelName + '_epoch-') + str(epoch)) + '.pth'))))
torch.cuda.empty_cache()
if (useTest and ((epoch % nTestInterval) == (nTestInterval - 1))):
val_cihp(net_, testloader=testloader, testloader_flip=testloader_flip, test_graph=test_graph, epoch=epoch, writer=writer, criterion=criterion, classes=opts.classes)
torch.cuda.empty_cache()
|
def flip(x, dim):
indices = ([slice(None)] * x.dim())
indices[dim] = torch.arange((x.size(dim) - 1), (- 1), (- 1), dtype=torch.long, device=x.device)
return x[tuple(indices)]
|
def flip_cihp(tail_list):
'\n\n :param tail_list: tail_list size is 1 x n_class x h x w\n :return:\n '
tail_list_rev = ([None] * 20)
for xx in range(14):
tail_list_rev[xx] = tail_list[xx].unsqueeze(0)
tail_list_rev[14] = tail_list[15].unsqueeze(0)
tail_list_rev[15] = tail_list[14].unsqueeze(0)
tail_list_rev[16] = tail_list[17].unsqueeze(0)
tail_list_rev[17] = tail_list[16].unsqueeze(0)
tail_list_rev[18] = tail_list[19].unsqueeze(0)
tail_list_rev[19] = tail_list[18].unsqueeze(0)
return torch.cat(tail_list_rev, dim=0)
|
def get_parser():
'argparse begin'
parser = argparse.ArgumentParser()
LookupChoices = type('', (argparse.Action,), dict(__call__=(lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v]))))
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--batch', default=16, type=int)
parser.add_argument('--lr', default=1e-07, type=float)
parser.add_argument('--numworker', default=12, type=int)
parser.add_argument('--step', default=10, type=int)
parser.add_argument('--classes', default=7, type=int)
parser.add_argument('--testepoch', default=10, type=int)
parser.add_argument('--loadmodel', default='', type=str)
parser.add_argument('--pretrainedModel', default='', type=str)
parser.add_argument('--hidden_layers', default=128, type=int)
parser.add_argument('--gpus', default=4, type=int)
parser.add_argument('--testInterval', default=5, type=int)
opts = parser.parse_args()
return opts
|
def get_graphs(opts):
'source is pascal; target is cihp; middle is atr'
cihp_adj = graph.preprocess_adj(graph.cihp_graph)
adj1_ = Variable(torch.from_numpy(cihp_adj).float())
adj1 = adj1_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 20, 20).cuda()
adj1_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20)
adj2_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())
adj2 = adj2_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 7).cuda()
adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7)
adj3_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()
adj3 = adj3_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 20).transpose(2, 3).cuda()
adj3_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).transpose(2, 3)
atr_adj = graph.preprocess_adj(graph.atr_graph)
adj4_ = Variable(torch.from_numpy(atr_adj).float())
adj4 = adj4_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 18, 18).cuda()
adj4_test = adj4_.unsqueeze(0).unsqueeze(0).expand(1, 1, 18, 18)
adj5_ = torch.from_numpy(graph.pascal2atr_nlp_adj).float()
adj5 = adj5_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 7, 18).cuda()
adj5_test = adj5_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 18)
adj6_ = torch.from_numpy(graph.cihp2atr_nlp_adj).float()
adj6 = adj6_.unsqueeze(0).unsqueeze(0).expand(opts.gpus, 1, 20, 18).cuda()
adj6_test = adj6_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 18)
train_graph = [adj1, adj2, adj3, adj4, adj5, adj6]
test_graph = [adj1_test, adj2_test, adj3_test, adj4_test, adj5_test, adj6_test]
return (train_graph, test_graph)
|
def main(opts):
p = OrderedDict()
p['trainBatch'] = opts.batch
testBatch = 1
useTest = True
nTestInterval = opts.testInterval
snapshot = 1
p['nAveGrad'] = 1
p['lr'] = opts.lr
p['wd'] = 0.0005
p['momentum'] = 0.9
p['epoch_size'] = opts.step
p['num_workers'] = opts.numworker
model_path = opts.pretrainedModel
backbone = 'xception'
nEpochs = opts.epochs
max_id = 0
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[(- 1)]
runs = glob.glob(os.path.join(save_dir_root, 'run', 'run_*'))
for r in runs:
run_id = int(r.split('_')[(- 1)])
if (run_id >= max_id):
max_id = (run_id + 1)
save_dir = os.path.join(save_dir_root, 'run', ('run_' + str(max_id)))
if (backbone == 'xception'):
net_ = deeplab_xception_universal.deeplab_xception_end2end_3d(n_classes=20, os=16, hidden_layers=opts.hidden_layers, source_classes=7, middle_classes=18)
elif (backbone == 'resnet'):
raise NotImplementedError
else:
raise NotImplementedError
modelName = ((('deeplabv3plus-' + backbone) + '-voc') + datetime.now().strftime('%b%d_%H-%M-%S'))
criterion = ut.cross_entropy2d
if (gpu_id >= 0):
net_.cuda()
if (not (model_path == '')):
x = torch.load(model_path)
net_.load_state_dict_new(x)
print('load pretrainedModel.')
else:
print('no pretrainedModel.')
if (not (opts.loadmodel == '')):
x = torch.load(opts.loadmodel)
net_.load_source_model(x)
print('load model:', opts.loadmodel)
else:
print('no trained model load !!!!!!!!')
log_dir = os.path.join(save_dir, 'models', ((datetime.now().strftime('%b%d_%H-%M-%S') + '_') + socket.gethostname()))
writer = SummaryWriter(log_dir=log_dir)
writer.add_text('load model', opts.loadmodel, 1)
writer.add_text('setting', sys.argv[0], 1)
optimizer = optim.SGD(net_.parameters(), lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd'])
composed_transforms_tr = transforms.Compose([tr.RandomSized_new(512), tr.Normalize_xception_tf(), tr.ToTensor_()])
composed_transforms_ts = transforms.Compose([tr.Normalize_xception_tf(), tr.ToTensor_()])
composed_transforms_ts_flip = transforms.Compose([tr.HorizontalFlip(), tr.Normalize_xception_tf(), tr.ToTensor_()])
all_train = cihp_pascal_atr.VOCSegmentation(split='train', transform=composed_transforms_tr, flip=True)
voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts)
voc_val_flip = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts_flip)
(num_cihp, num_pascal, num_atr) = all_train.get_class_num()
ss = sam.Sampler_uni(num_cihp, num_pascal, num_atr, opts.batch)
ss_balanced = sam.Sampler_uni(num_cihp, num_pascal, num_atr, opts.batch, balance_id=1)
trainloader = DataLoader(all_train, batch_size=p['trainBatch'], shuffle=False, num_workers=p['num_workers'], sampler=ss, drop_last=True)
trainloader_balanced = DataLoader(all_train, batch_size=p['trainBatch'], shuffle=False, num_workers=p['num_workers'], sampler=ss_balanced, drop_last=True)
testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=p['num_workers'])
testloader_flip = DataLoader(voc_val_flip, batch_size=testBatch, shuffle=False, num_workers=p['num_workers'])
num_img_tr = len(trainloader)
num_img_balanced = len(trainloader_balanced)
num_img_ts = len(testloader)
running_loss_tr = 0.0
running_loss_tr_atr = 0.0
running_loss_ts = 0.0
aveGrad = 0
global_step = 0
print('Training Network')
net = torch.nn.DataParallel(net_)
id_list = torch.LongTensor(range(opts.batch))
pascal_iter = int((num_img_tr // opts.batch))
(train_graph, test_graph) = get_graphs(opts)
(adj1, adj2, adj3, adj4, adj5, adj6) = train_graph
(adj1_test, adj2_test, adj3_test, adj4_test, adj5_test, adj6_test) = test_graph
for epoch in range(resume_epoch, int((1.5 * nEpochs))):
start_time = timeit.default_timer()
if (((epoch % p['epoch_size']) == (p['epoch_size'] - 1)) and (epoch < nEpochs)):
lr_ = ut.lr_poly(p['lr'], epoch, nEpochs, 0.9)
optimizer = optim.SGD(net_.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd'])
print('(poly lr policy) learning rate: ', lr_)
writer.add_scalar('data/lr_', lr_, epoch)
elif (((epoch % p['epoch_size']) == (p['epoch_size'] - 1)) and (epoch > nEpochs)):
lr_ = ut.lr_poly(p['lr'], (epoch - nEpochs), int((0.5 * nEpochs)), 0.9)
optimizer = optim.SGD(net_.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd'])
print('(poly lr policy) learning rate: ', lr_)
writer.add_scalar('data/lr_', lr_, epoch)
net_.train()
if (epoch < nEpochs):
for (ii, sample_batched) in enumerate(trainloader):
(inputs, labels) = (sample_batched['image'], sample_batched['label'])
dataset_lbl = sample_batched['pascal'][0].item()
(inputs, labels) = (Variable(inputs, requires_grad=True), Variable(labels))
global_step += 1
if (gpu_id >= 0):
(inputs, labels) = (inputs.cuda(), labels.cuda())
if (dataset_lbl == 0):
(_, outputs, _) = net.forward(None, input_target=inputs, input_middle=None, adj1_target=adj1, adj2_source=adj2, adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, adj6_transfer_m2t=adj6)
elif (dataset_lbl == 1):
(outputs, _, _) = net.forward(inputs, input_target=None, input_middle=None, adj1_target=adj1, adj2_source=adj2, adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, adj6_transfer_m2t=adj6)
else:
(_, _, outputs) = net.forward(None, input_target=None, input_middle=inputs, adj1_target=adj1, adj2_source=adj2, adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, adj6_transfer_m2t=adj6)
loss = criterion(outputs, labels, batch_average=True)
running_loss_tr += loss.item()
if ((ii % num_img_tr) == (num_img_tr - 1)):
running_loss_tr = (running_loss_tr / num_img_tr)
writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch)
print(('[Epoch: %d, numImages: %5d]' % (epoch, epoch)))
print(('Loss: %f' % running_loss_tr))
running_loss_tr = 0
stop_time = timeit.default_timer()
print((('Execution time: ' + str((stop_time - start_time))) + '\n'))
loss /= p['nAveGrad']
loss.backward()
aveGrad += 1
if ((aveGrad % p['nAveGrad']) == 0):
writer.add_scalar('data/total_loss_iter', loss.item(), global_step)
if (dataset_lbl == 0):
writer.add_scalar('data/total_loss_iter_cihp', loss.item(), global_step)
if (dataset_lbl == 1):
writer.add_scalar('data/total_loss_iter_pascal', loss.item(), global_step)
if (dataset_lbl == 2):
writer.add_scalar('data/total_loss_iter_atr', loss.item(), global_step)
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
if ((ii % (num_img_tr // 10)) == 0):
grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(ut.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(ut.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step)
print('loss is ', loss.cpu().item(), flush=True)
else:
for (ii, sample_batched) in enumerate(trainloader_balanced):
(inputs, labels) = (sample_batched['image'], sample_batched['label'])
dataset_lbl = sample_batched['pascal'][0].item()
(inputs, labels) = (Variable(inputs, requires_grad=True), Variable(labels))
global_step += 1
if (gpu_id >= 0):
(inputs, labels) = (inputs.cuda(), labels.cuda())
if (dataset_lbl == 0):
(_, outputs, _) = net.forward(None, input_target=inputs, input_middle=None, adj1_target=adj1, adj2_source=adj2, adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, adj6_transfer_m2t=adj6)
elif (dataset_lbl == 1):
(outputs, _, _) = net.forward(inputs, input_target=None, input_middle=None, adj1_target=adj1, adj2_source=adj2, adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, adj6_transfer_m2t=adj6)
else:
(_, _, outputs) = net.forward(None, input_target=None, input_middle=inputs, adj1_target=adj1, adj2_source=adj2, adj3_transfer_s2t=adj3, adj3_transfer_t2s=adj3.transpose(2, 3), adj4_middle=adj4, adj5_transfer_s2m=adj5.transpose(2, 3), adj6_transfer_t2m=adj6.transpose(2, 3), adj5_transfer_m2s=adj5, adj6_transfer_m2t=adj6)
loss = criterion(outputs, labels, batch_average=True)
running_loss_tr += loss.item()
if ((ii % num_img_balanced) == (num_img_balanced - 1)):
running_loss_tr = (running_loss_tr / num_img_balanced)
writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch)
print(('[Epoch: %d, numImages: %5d]' % (epoch, epoch)))
print(('Loss: %f' % running_loss_tr))
running_loss_tr = 0
stop_time = timeit.default_timer()
print((('Execution time: ' + str((stop_time - start_time))) + '\n'))
loss /= p['nAveGrad']
loss.backward()
aveGrad += 1
if ((aveGrad % p['nAveGrad']) == 0):
writer.add_scalar('data/total_loss_iter', loss.item(), global_step)
if (dataset_lbl == 0):
writer.add_scalar('data/total_loss_iter_cihp', loss.item(), global_step)
if (dataset_lbl == 1):
writer.add_scalar('data/total_loss_iter_pascal', loss.item(), global_step)
if (dataset_lbl == 2):
writer.add_scalar('data/total_loss_iter_atr', loss.item(), global_step)
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
if ((ii % (num_img_balanced // 10)) == 0):
grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(ut.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(ut.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step)
print('loss is ', loss.cpu().item(), flush=True)
if ((epoch % snapshot) == (snapshot - 1)):
torch.save(net_.state_dict(), os.path.join(save_dir, 'models', (((modelName + '_epoch-') + str(epoch)) + '.pth')))
print('Save model at {}\n'.format(os.path.join(save_dir, 'models', (((modelName + '_epoch-') + str(epoch)) + '.pth'))))
if (useTest and ((epoch % nTestInterval) == (nTestInterval - 1))):
val_pascal(net_=net_, testloader=testloader, testloader_flip=testloader_flip, test_graph=test_graph, criterion=criterion, epoch=epoch, writer=writer)
|
def val_pascal(net_, testloader, testloader_flip, test_graph, criterion, epoch, writer, classes=7):
running_loss_ts = 0.0
miou = 0
(adj1_test, adj2_test, adj3_test, adj4_test, adj5_test, adj6_test) = test_graph
num_img_ts = len(testloader)
net_.eval()
pred_list = []
label_list = []
for (ii, sample_batched) in enumerate(zip(testloader, testloader_flip)):
(inputs, labels) = (sample_batched[0]['image'], sample_batched[0]['label'])
(inputs_f, _) = (sample_batched[1]['image'], sample_batched[1]['label'])
inputs = torch.cat((inputs, inputs_f), dim=0)
(inputs, labels) = (Variable(inputs, requires_grad=False), Variable(labels))
with torch.no_grad():
if (gpu_id >= 0):
(inputs, labels) = (inputs.cuda(), labels.cuda())
(outputs, _, _) = net_.forward(inputs, input_target=None, input_middle=None, adj1_target=adj1_test.cuda(), adj2_source=adj2_test.cuda(), adj3_transfer_s2t=adj3_test.cuda(), adj3_transfer_t2s=adj3_test.transpose(2, 3).cuda(), adj4_middle=adj4_test.cuda(), adj5_transfer_s2m=adj5_test.transpose(2, 3).cuda(), adj6_transfer_t2m=adj6_test.transpose(2, 3).cuda(), adj5_transfer_m2s=adj5_test.cuda(), adj6_transfer_m2t=adj6_test.cuda())
outputs = ((outputs[0] + flip(outputs[1], dim=(- 1))) / 2)
outputs = outputs.unsqueeze(0)
predictions = torch.max(outputs, 1)[1]
pred_list.append(predictions.cpu())
label_list.append(labels.squeeze(1).cpu())
loss = criterion(outputs, labels, batch_average=True)
running_loss_ts += loss.item()
if ((ii % num_img_ts) == (num_img_ts - 1)):
miou = get_iou_from_list(pred_list, label_list, n_cls=classes)
running_loss_ts = (running_loss_ts / num_img_ts)
print('Validation:')
print(('[Epoch: %d, numImages: %5d]' % (epoch, ((ii * 1) + inputs.data.shape[0]))))
writer.add_scalar('data/test_loss_epoch', running_loss_ts, epoch)
writer.add_scalar('data/test_miour', miou, epoch)
print(('Loss: %f' % running_loss_ts))
print(('MIoU: %f\n' % miou))
|
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=0, dilation=1, bias=False):
super(SeparableConv2d, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
|
def fixed_padding(inputs, kernel_size, rate):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (rate - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
|
class SeparableConv2d_aspp(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_aspp, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = nn.BatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.relu(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
x = self.relu(x)
return x
|
class Decoder_module(nn.Module):
def __init__(self, inplanes, planes, rate=1):
super(Decoder_module, self).__init__()
self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate, padding=1)
def forward(self, x):
x = self.atrous_convolution(x)
return x
|
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate):
super(ASPP_module, self).__init__()
if (rate == 1):
raise RuntimeError()
else:
kernel_size = 3
padding = rate
self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate, padding=padding)
def forward(self, x):
x = self.atrous_convolution(x)
return x
|
class ASPP_module_rate0(nn.Module):
def __init__(self, inplanes, planes, rate=1):
super(ASPP_module_rate0, self).__init__()
if (rate == 1):
kernel_size = 1
padding = 0
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=rate, bias=False)
self.bn = nn.BatchNorm2d(planes, eps=1e-05, affine=True)
self.relu = nn.ReLU()
else:
raise RuntimeError()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
|
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_same, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = nn.BatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = nn.BatchNorm2d(planes)
def forward(self, x):
x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
return x
|
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=2, bias=False)
if is_last:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=1, bias=False)
self.skipbn = nn.BatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
filters = planes
for i in range((reps - 1)):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
if (not grow_first):
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
if (not start_with_relu):
rep = rep[1:]
if (stride != 1):
rep.append(self.relu)
rep.append(SeparableConv2d_same(planes, planes, 3, stride=2, dilation=dilation))
if is_last:
rep.append(self.relu)
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1, dilation=dilation))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
|
class Block2(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block2, self).__init__()
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = nn.BatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
filters = planes
for i in range((reps - 1)):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
if (not grow_first):
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
if (not start_with_relu):
rep = rep[1:]
if (stride != 1):
self.block2_lastconv = nn.Sequential(*[self.relu, SeparableConv2d_same(planes, planes, 3, stride=2, dilation=dilation)])
if is_last:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
low_middle = x.clone()
x1 = x
x1 = self.block2_lastconv(x1)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x1 += skip
return (x1, low_middle)
|
class Xception(nn.Module):
'\n Modified Alighed Xception\n '
def __init__(self, inplanes=3, os=16, pretrained=False):
super(Xception, self).__init__()
if (os == 16):
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
elif (os == 8):
entry_block3_stride = 1
middle_block_rate = 2
exit_block_rates = (2, 4)
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
self.block2 = Block2(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True)
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_rates[0], start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d_aspp(1024, 1536, 3, stride=1, dilation=exit_block_rates[1], padding=exit_block_rates[1])
self.conv4 = SeparableConv2d_aspp(1536, 1536, 3, stride=1, dilation=exit_block_rates[1], padding=exit_block_rates[1])
self.conv5 = SeparableConv2d_aspp(1536, 2048, 3, stride=1, dilation=exit_block_rates[1], padding=exit_block_rates[1])
if pretrained:
self.__load_xception_pretrained()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
(x, low_level_feat) = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
x = self.block20(x)
x = self.conv3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.relu(x)
return (x, low_level_feat)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def __load_xception_pretrained(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for (k, v) in pretrain_dict.items():
if (k in state_dict):
if ('pointwise' in k):
v = v.unsqueeze((- 1)).unsqueeze((- 1))
if k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('block11'):
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('conv3'):
model_dict[k] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
|
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True):
if _print:
print('Constructing DeepLabv3+ model...')
print('Number of classes: {}'.format(n_classes))
print('Output stride: {}'.format(os))
print('Number of Input Channels: {}'.format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
self.xception_features = Xception(nInputChannels, os, pretrained)
if (os == 16):
rates = [1, 6, 12, 18]
elif (os == 8):
rates = [1, 12, 24, 36]
raise NotImplementedError
else:
raise NotImplementedError
self.aspp1 = ASPP_module_rate0(2048, 256, rate=rates[0])
self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
self.aspp4 = ASPP_module(2048, 256, rate=rates[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(2048, 256, 1, stride=1, bias=False), nn.BatchNorm2d(256), nn.ReLU())
self.concat_projection_conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.concat_projection_bn1 = nn.BatchNorm2d(256)
self.feature_projection_conv1 = nn.Conv2d(256, 48, 1, bias=False)
self.feature_projection_bn1 = nn.BatchNorm2d(48)
self.decoder = nn.Sequential(Decoder_module(304, 256), Decoder_module(256, 256))
self.semantic = nn.Conv2d(256, n_classes, kernel_size=1, stride=1)
def forward(self, input):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.xception_features.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def freeze_totally_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def freeze_aspp_bn(self):
for m in self.aspp1.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp2.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp3.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp4.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def learnable_parameters(self):
layer_features_BN = []
layer_features = []
layer_aspp = []
layer_projection = []
layer_decoder = []
layer_other = []
model_para = list(self.named_parameters())
for (name, para) in model_para:
if ('xception' in name):
if (('bn' in name) or ('downsample.1.weight' in name) or ('downsample.1.bias' in name)):
layer_features_BN.append(para)
else:
layer_features.append(para)
elif ('aspp' in name):
layer_aspp.append(para)
elif ('projection' in name):
layer_projection.append(para)
elif ('decode' in name):
layer_decoder.append(para)
elif ('global' not in name):
layer_other.append(para)
return (layer_features_BN, layer_features, layer_aspp, layer_projection, layer_decoder, layer_other)
def get_backbone_para(self):
layer_features = []
other_features = []
model_para = list(self.named_parameters())
for (name, para) in model_para:
if ('xception' in name):
layer_features.append(para)
else:
other_features.append(para)
return (layer_features, other_features)
def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False):
'Sets the module in training mode.\n\n This has any effect only on certain modules. See documentations of\n particular modules for details of their behaviors in training/evaluation\n mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n etc.\n\n Returns:\n Module: self\n '
super(DeepLabv3_plus, self).train(mode)
if freeze_bn:
print('Freezing Mean/Var of BatchNorm2D.')
if freeze_bn_affine:
print('Freezing Weight/Bias of BatchNorm2D.')
if freeze_bn:
for m in self.xception_features.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if freeze_bn_affine:
m.weight.requires_grad = False
m.bias.requires_grad = False
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_state_dict_new(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
|
def get_1x_lr_params(model):
'\n This generator returns all the parameters of the net except for\n the last classification layer. Note that for each batchnorm layer,\n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return\n any batchnorm parameter\n '
b = [model.xception_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
(yield k)
|
def get_10x_lr_params(model):
'\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n '
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
(yield k)
|
def fixed_padding(inputs, kernel_size, rate):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (rate - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
|
class SeparableConv2d_aspp(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_aspp, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = SynchronizedBatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = SynchronizedBatchNorm2d(planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.relu(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
x = self.relu(x)
return x
|
class Decoder_module(nn.Module):
def __init__(self, inplanes, planes, rate=1):
super(Decoder_module, self).__init__()
self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate, padding=1)
def forward(self, x):
x = self.atrous_convolution(x)
return x
|
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, rate):
super(ASPP_module, self).__init__()
if (rate == 1):
raise RuntimeError()
else:
kernel_size = 3
padding = rate
self.atrous_convolution = SeparableConv2d_aspp(inplanes, planes, 3, stride=1, dilation=rate, padding=padding)
def forward(self, x):
x = self.atrous_convolution(x)
return x
|
class ASPP_module_rate0(nn.Module):
def __init__(self, inplanes, planes, rate=1):
super(ASPP_module_rate0, self).__init__()
if (rate == 1):
kernel_size = 1
padding = 0
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=rate, bias=False)
self.bn = SynchronizedBatchNorm2d(planes, eps=1e-05, affine=True)
self.relu = nn.ReLU()
else:
raise RuntimeError()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
|
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_same, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = SynchronizedBatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = SynchronizedBatchNorm2d(planes)
def forward(self, x):
x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
return x
|
class Block(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block, self).__init__()
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=2, bias=False)
if is_last:
self.skip = nn.Conv2d(inplanes, planes, 1, stride=1, bias=False)
self.skipbn = SynchronizedBatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
filters = planes
for i in range((reps - 1)):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
if (not grow_first):
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
if (not start_with_relu):
rep = rep[1:]
if (stride != 1):
rep.append(self.relu)
rep.append(SeparableConv2d_same(planes, planes, 3, stride=2, dilation=dilation))
if is_last:
rep.append(self.relu)
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1, dilation=dilation))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x += skip
return x
|
class Block2(nn.Module):
def __init__(self, inplanes, planes, reps, stride=1, dilation=1, start_with_relu=True, grow_first=True, is_last=False):
super(Block2, self).__init__()
if ((planes != inplanes) or (stride != 1)):
self.skip = nn.Conv2d(inplanes, planes, 1, stride=stride, bias=False)
self.skipbn = SynchronizedBatchNorm2d(planes)
else:
self.skip = None
self.relu = nn.ReLU(inplace=True)
rep = []
filters = inplanes
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
filters = planes
for i in range((reps - 1)):
rep.append(self.relu)
rep.append(SeparableConv2d_same(filters, filters, 3, stride=1, dilation=dilation))
if (not grow_first):
rep.append(self.relu)
rep.append(SeparableConv2d_same(inplanes, planes, 3, stride=1, dilation=dilation))
if (not start_with_relu):
rep = rep[1:]
if (stride != 1):
self.block2_lastconv = nn.Sequential(*[self.relu, SeparableConv2d_same(planes, planes, 3, stride=2, dilation=dilation)])
if is_last:
rep.append(SeparableConv2d_same(planes, planes, 3, stride=1))
self.rep = nn.Sequential(*rep)
def forward(self, inp):
x = self.rep(inp)
low_middle = x.clone()
x1 = x
x1 = self.block2_lastconv(x1)
if (self.skip is not None):
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x1 += skip
return (x1, low_middle)
|
class Xception(nn.Module):
'\n Modified Alighed Xception\n '
def __init__(self, inplanes=3, os=16, pretrained=False):
super(Xception, self).__init__()
if (os == 16):
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
elif (os == 8):
entry_block3_stride = 1
middle_block_rate = 2
exit_block_rates = (2, 4)
else:
raise NotImplementedError
self.conv1 = nn.Conv2d(inplanes, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = SynchronizedBatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = SynchronizedBatchNorm2d(64)
self.block1 = Block(64, 128, reps=2, stride=2, start_with_relu=False)
self.block2 = Block2(128, 256, reps=2, stride=2, start_with_relu=True, grow_first=True)
self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, start_with_relu=True, grow_first=True)
self.block4 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block5 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block6 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block7 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block8 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block9 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block10 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block11 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block12 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block13 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block14 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block15 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block16 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block17 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block18 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block19 = Block(728, 728, reps=3, stride=1, dilation=middle_block_rate, start_with_relu=True, grow_first=True)
self.block20 = Block(728, 1024, reps=2, stride=1, dilation=exit_block_rates[0], start_with_relu=True, grow_first=False, is_last=True)
self.conv3 = SeparableConv2d_aspp(1024, 1536, 3, stride=1, dilation=exit_block_rates[1], padding=exit_block_rates[1])
self.conv4 = SeparableConv2d_aspp(1536, 1536, 3, stride=1, dilation=exit_block_rates[1], padding=exit_block_rates[1])
self.conv5 = SeparableConv2d_aspp(1536, 2048, 3, stride=1, dilation=exit_block_rates[1], padding=exit_block_rates[1])
if pretrained:
self.__load_xception_pretrained()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
(x, low_level_feat) = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
x = self.block19(x)
x = self.block20(x)
x = self.conv3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.relu(x)
return (x, low_level_feat)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def __load_xception_pretrained(self):
pretrain_dict = model_zoo.load_url('http://data.lip6.fr/cadene/pretrainedmodels/xception-b5690688.pth')
model_dict = {}
state_dict = self.state_dict()
for (k, v) in pretrain_dict.items():
if (k in state_dict):
if ('pointwise' in k):
v = v.unsqueeze((- 1)).unsqueeze((- 1))
if k.startswith('block12'):
model_dict[k.replace('block12', 'block20')] = v
elif k.startswith('block11'):
model_dict[k.replace('block11', 'block12')] = v
model_dict[k.replace('block11', 'block13')] = v
model_dict[k.replace('block11', 'block14')] = v
model_dict[k.replace('block11', 'block15')] = v
model_dict[k.replace('block11', 'block16')] = v
model_dict[k.replace('block11', 'block17')] = v
model_dict[k.replace('block11', 'block18')] = v
model_dict[k.replace('block11', 'block19')] = v
elif k.startswith('conv3'):
model_dict[k] = v
elif k.startswith('bn3'):
model_dict[k] = v
model_dict[k.replace('bn3', 'bn4')] = v
elif k.startswith('conv4'):
model_dict[k.replace('conv4', 'conv5')] = v
elif k.startswith('bn4'):
model_dict[k.replace('bn4', 'bn5')] = v
else:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
|
class DeepLabv3_plus(nn.Module):
def __init__(self, nInputChannels=3, n_classes=21, os=16, pretrained=False, _print=True):
if _print:
print('Constructing DeepLabv3+ model...')
print('Number of classes: {}'.format(n_classes))
print('Output stride: {}'.format(os))
print('Number of Input Channels: {}'.format(nInputChannels))
super(DeepLabv3_plus, self).__init__()
self.xception_features = Xception(nInputChannels, os, pretrained)
if (os == 16):
rates = [1, 6, 12, 18]
elif (os == 8):
rates = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = ASPP_module_rate0(2048, 256, rate=rates[0])
self.aspp2 = ASPP_module(2048, 256, rate=rates[1])
self.aspp3 = ASPP_module(2048, 256, rate=rates[2])
self.aspp4 = ASPP_module(2048, 256, rate=rates[3])
self.relu = nn.ReLU()
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(2048, 256, 1, stride=1, bias=False), SynchronizedBatchNorm2d(256), nn.ReLU())
self.concat_projection_conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.concat_projection_bn1 = SynchronizedBatchNorm2d(256)
self.feature_projection_conv1 = nn.Conv2d(256, 48, 1, bias=False)
self.feature_projection_bn1 = SynchronizedBatchNorm2d(48)
self.decoder = nn.Sequential(Decoder_module(304, 256), Decoder_module(256, 256))
self.semantic = nn.Conv2d(256, n_classes, kernel_size=1, stride=1)
def forward(self, input):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def freeze_bn(self):
for m in self.xception_features.modules():
if (isinstance(m, nn.BatchNorm2d) or isinstance(m, SynchronizedBatchNorm2d)):
m.eval()
def freeze_aspp_bn(self):
for m in self.aspp1.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp2.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp3.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
for m in self.aspp4.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def learnable_parameters(self):
layer_features_BN = []
layer_features = []
layer_aspp = []
layer_projection = []
layer_decoder = []
layer_other = []
model_para = list(self.named_parameters())
for (name, para) in model_para:
if ('xception' in name):
if (('bn' in name) or ('downsample.1.weight' in name) or ('downsample.1.bias' in name)):
layer_features_BN.append(para)
else:
layer_features.append(para)
elif ('aspp' in name):
layer_aspp.append(para)
elif ('projection' in name):
layer_projection.append(para)
elif ('decode' in name):
layer_decoder.append(para)
else:
layer_other.append(para)
return (layer_features_BN, layer_features, layer_aspp, layer_projection, layer_decoder, layer_other)
def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_state_dict_new(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
|
def get_1x_lr_params(model):
'\n This generator returns all the parameters of the net except for\n the last classification layer. Note that for each batchnorm layer,\n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return\n any batchnorm parameter\n '
b = [model.xception_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
(yield k)
|
def get_10x_lr_params(model):
'\n This generator returns all the parameters for the last layer of the net,\n which does the classification of pixel into classes\n '
b = [model.aspp1, model.aspp2, model.aspp3, model.aspp4, model.conv1, model.conv2, model.last_conv]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
(yield k)
|
class deeplab_xception_transfer_basemodel(deeplab_xception.DeepLabv3_plus):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256):
super(deeplab_xception_transfer_basemodel, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_graph' not in name) and ('transpose_graph' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
def get_target_parameter(self):
l = []
other = []
for (name, k) in self.named_parameters():
if (('target' in name) or ('semantic' in name)):
l.append(k)
else:
other.append(k)
return (l, other)
def get_semantic_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('semantic' in name):
l.append(k)
return l
def get_source_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('source' in name):
l.append(k)
return l
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
|
class deeplab_xception_transfer_basemodel_savememory(deeplab_xception.DeepLabv3_plus):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256):
super(deeplab_xception_transfer_basemodel_savememory, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_graph' not in name) and ('transpose_graph' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
def get_target_parameter(self):
l = []
other = []
for (name, k) in self.named_parameters():
if (('target' in name) or ('semantic' in name)):
l.append(k)
else:
other.append(k)
return (l, other)
def get_semantic_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('semantic' in name):
l.append(k)
return l
def get_source_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('source' in name):
l.append(k)
return l
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
|
class deeplab_xception_transfer_basemodel_synBN(deeplab_xception_synBN.DeepLabv3_plus):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256):
super(deeplab_xception_transfer_basemodel_synBN, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
def get_target_parameter(self):
l = []
other = []
for (name, k) in self.named_parameters():
if (('target' in name) or ('semantic' in name)):
l.append(k)
else:
other.append(k)
return (l, other)
def get_semantic_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('semantic' in name):
l.append(k)
return l
def get_source_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('source' in name):
l.append(k)
return l
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
|
class deeplab_xception_transfer_basemodel_synBN_savememory(deeplab_xception_synBN.DeepLabv3_plus):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256):
super(deeplab_xception_transfer_basemodel_synBN_savememory, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.BatchNorm2d(input_channels), nn.ReLU(True)])
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
def get_target_parameter(self):
l = []
other = []
for (name, k) in self.named_parameters():
if (('target' in name) or ('semantic' in name)):
l.append(k)
else:
other.append(k)
return (l, other)
def get_semantic_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('semantic' in name):
l.append(k)
return l
def get_source_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('source' in name):
l.append(k)
return l
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
|
class deeplab_xception_transfer_projection(deeplab_xception_transfer_basemodel):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, transfer_graph=None, source_classes=20):
super(deeplab_xception_transfer_projection, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os, input_channels=input_channels, hidden_layers=hidden_layers, out_channels=out_channels)
self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.transpose_graph = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=n_classes)
self.fc_graph = gcn.GraphConvolution((hidden_layers * 3), hidden_layers)
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True)
source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True)
source_2_target_graph1_v5 = self.transpose_graph.forward(source_graph1, adj=adj3_transfer, relu=True)
source_2_target_graph2_v5 = self.transpose_graph.forward(source_graph2, adj=adj3_transfer, relu=True)
source_2_target_graph3_v5 = self.transpose_graph.forward(source_graph3, adj=adj3_transfer, relu=True)
graph = self.target_featuremap_2_graph(x)
source_2_target_graph1 = self.similarity_trans(source_graph1, graph)
graph = torch.cat((graph, source_2_target_graph1.squeeze(0), source_2_target_graph1_v5.squeeze(0)), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph2 = self.similarity_trans(source_graph2, graph)
graph = torch.cat((graph, source_2_target_graph2, source_2_target_graph2_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph3 = self.similarity_trans(source_graph3, graph)
graph = torch.cat((graph, source_2_target_graph3, source_2_target_graph3_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def similarity_trans(self, source, target):
sim = torch.matmul(F.normalize(target, p=2, dim=(- 1)), F.normalize(source, p=2, dim=(- 1)).transpose((- 1), (- 2)))
sim = F.softmax(sim, dim=(- 1))
return torch.matmul(sim, source)
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_' not in name) and ('transpose_graph' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
|
class deeplab_xception_transfer_projection_savemem(deeplab_xception_transfer_basemodel_savememory):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, transfer_graph=None, source_classes=20):
super(deeplab_xception_transfer_projection_savemem, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os, input_channels=input_channels, hidden_layers=hidden_layers, out_channels=out_channels)
self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.transpose_graph = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=n_classes)
self.fc_graph = gcn.GraphConvolution((hidden_layers * 3), hidden_layers)
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True)
source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True)
source_2_target_graph1_v5 = self.transpose_graph.forward(source_graph1, adj=adj3_transfer, relu=True)
source_2_target_graph2_v5 = self.transpose_graph.forward(source_graph2, adj=adj3_transfer, relu=True)
source_2_target_graph3_v5 = self.transpose_graph.forward(source_graph3, adj=adj3_transfer, relu=True)
graph = self.target_featuremap_2_graph(x)
source_2_target_graph1 = self.similarity_trans(source_graph1, graph)
graph = torch.cat((graph, source_2_target_graph1.squeeze(0), source_2_target_graph1_v5.squeeze(0)), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph2 = self.similarity_trans(source_graph2, graph)
graph = torch.cat((graph, source_2_target_graph2, source_2_target_graph2_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph3 = self.similarity_trans(source_graph3, graph)
graph = torch.cat((graph, source_2_target_graph3, source_2_target_graph3_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def similarity_trans(self, source, target):
sim = torch.matmul(F.normalize(target, p=2, dim=(- 1)), F.normalize(source, p=2, dim=(- 1)).transpose((- 1), (- 2)))
sim = F.softmax(sim, dim=(- 1))
return torch.matmul(sim, source)
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_' not in name) and ('transpose_graph' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
|
class deeplab_xception_transfer_projection_synBN_savemem(deeplab_xception_transfer_basemodel_synBN_savememory):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, transfer_graph=None, source_classes=20):
super(deeplab_xception_transfer_projection_synBN_savemem, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os, input_channels=input_channels, hidden_layers=hidden_layers, out_channels=out_channels)
self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.transpose_graph = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=n_classes)
self.fc_graph = gcn.GraphConvolution((hidden_layers * 3), hidden_layers)
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True)
source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True)
source_2_target_graph1_v5 = self.transpose_graph.forward(source_graph1, adj=adj3_transfer, relu=True)
source_2_target_graph2_v5 = self.transpose_graph.forward(source_graph2, adj=adj3_transfer, relu=True)
source_2_target_graph3_v5 = self.transpose_graph.forward(source_graph3, adj=adj3_transfer, relu=True)
graph = self.target_featuremap_2_graph(x)
source_2_target_graph1 = self.similarity_trans(source_graph1, graph)
graph = torch.cat((graph, source_2_target_graph1.squeeze(0), source_2_target_graph1_v5.squeeze(0)), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph2 = self.similarity_trans(source_graph2, graph)
graph = torch.cat((graph, source_2_target_graph2, source_2_target_graph2_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
source_2_target_graph3 = self.similarity_trans(source_graph3, graph)
graph = torch.cat((graph, source_2_target_graph3, source_2_target_graph3_v5), dim=(- 1))
graph = self.fc_graph.forward(graph, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
def similarity_trans(self, source, target):
sim = torch.matmul(F.normalize(target, p=2, dim=(- 1)), F.normalize(source, p=2, dim=(- 1)).transpose((- 1), (- 2)))
sim = F.softmax(sim, dim=(- 1))
return torch.matmul(sim, source)
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_' not in name) and ('transpose_graph' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
|
class deeplab_xception_transfer_basemodel_savememory(deeplab_xception.DeepLabv3_plus):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, source_classes=20, transfer_graph=None):
super(deeplab_xception_transfer_basemodel_savememory, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_graph' not in name) and ('transpose_graph' not in name) and ('middle' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
def get_target_parameter(self):
l = []
other = []
for (name, k) in self.named_parameters():
if (('target' in name) or ('semantic' in name)):
l.append(k)
else:
other.append(k)
return (l, other)
def get_semantic_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('semantic' in name):
l.append(k)
return l
def get_source_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('source' in name):
l.append(k)
return l
def top_forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True)
source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
|
class deeplab_xception_transfer_basemodel_savememory_synbn(deeplab_xception_synBN.DeepLabv3_plus):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256, source_classes=20, transfer_graph=None):
super(deeplab_xception_transfer_basemodel_savememory_synbn, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name) and ('fc_graph' not in name) and ('transpose_graph' not in name) and ('middle' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
def get_target_parameter(self):
l = []
other = []
for (name, k) in self.named_parameters():
if (('target' in name) or ('semantic' in name)):
l.append(k)
else:
other.append(k)
return (l, other)
def get_semantic_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('semantic' in name):
l.append(k)
return l
def get_source_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('source' in name):
l.append(k)
return l
def top_forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
source_graph2 = self.source_graph_conv2.forward(source_graph1, adj=adj2_source, relu=True)
source_graph3 = self.source_graph_conv2.forward(source_graph2, adj=adj2_source, relu=True)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x
|
class deeplab_xception_end2end_3d(deeplab_xception_transfer_basemodel_savememory):
def __init__(self, nInputChannels=3, n_classes=20, os=16, input_channels=256, hidden_layers=128, out_channels=256, source_classes=7, middle_classes=18, transfer_graph=None):
super(deeplab_xception_end2end_3d, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
self.source_semantic = nn.Conv2d(out_channels, source_classes, 1)
self.middle_semantic = nn.Conv2d(out_channels, middle_classes, 1)
self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
self.middle_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=middle_classes)
self.middle_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.middle_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.middle_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.middle_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.middle_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
self.transpose_graph_source2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=n_classes)
self.transpose_graph_target2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=n_classes, end_nodes=source_classes)
self.transpose_graph_middle2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=middle_classes, end_nodes=source_classes)
self.transpose_graph_middle2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=middle_classes, end_nodes=source_classes)
self.transpose_graph_source2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=middle_classes)
self.transpose_graph_target2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=n_classes, end_nodes=middle_classes)
self.fc_graph_source = gcn.GraphConvolution((hidden_layers * 5), hidden_layers)
self.fc_graph_target = gcn.GraphConvolution((hidden_layers * 5), hidden_layers)
self.fc_graph_middle = gcn.GraphConvolution((hidden_layers * 5), hidden_layers)
def freeze_totally_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
def freeze_backbone_bn(self):
for m in self.xception_features.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
def top_forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer_s2t=None, adj3_transfer_t2s=None, adj4_middle=None, adj5_transfer_s2m=None, adj6_transfer_t2m=None, adj5_transfer_m2s=None, adj6_transfer_m2t=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
target_graph = self.target_featuremap_2_graph(x)
middle_graph = self.middle_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
target_graph1 = self.target_graph_conv1.forward(target_graph, adj=adj1_target, relu=True)
middle_graph1 = self.target_graph_conv1.forward(middle_graph, adj=adj4_middle, relu=True)
source_2_target_graph1_v5 = self.transpose_graph_source2target.forward(source_graph1, adj=adj3_transfer_s2t, relu=True)
source_2_middle_graph1_v5 = self.transpose_graph_source2middle.forward(source_graph1, adj=adj5_transfer_s2m, relu=True)
target_2_source_graph1_v5 = self.transpose_graph_target2source.forward(target_graph1, adj=adj3_transfer_t2s, relu=True)
target_2_middle_graph1_v5 = self.transpose_graph_target2middle.forward(target_graph1, adj=adj6_transfer_t2m, relu=True)
middle_2_source_graph1_v5 = self.transpose_graph_middle2source.forward(middle_graph1, adj=adj5_transfer_m2s, relu=True)
middle_2_target_graph1_v5 = self.transpose_graph_middle2target.forward(middle_graph1, adj=adj6_transfer_m2t, relu=True)
source_2_target_graph1 = self.similarity_trans(source_graph1, target_graph1)
source_2_middle_graph1 = self.similarity_trans(source_graph1, middle_graph1)
target_2_source_graph1 = self.similarity_trans(target_graph1, source_graph1)
target_2_middle_graph1 = self.similarity_trans(target_graph1, middle_graph1)
middle_2_source_graph1 = self.similarity_trans(middle_graph1, source_graph1)
middle_2_target_graph1 = self.similarity_trans(middle_graph1, target_graph1)
source_graph1 = torch.cat((source_graph1, target_2_source_graph1, target_2_source_graph1_v5, middle_2_source_graph1, middle_2_source_graph1_v5), dim=(- 1))
source_graph1 = self.fc_graph_source.forward(source_graph1, relu=True)
target_graph1 = torch.cat((target_graph1, source_2_target_graph1, source_2_target_graph1_v5, middle_2_target_graph1, middle_2_target_graph1_v5), dim=(- 1))
target_graph1 = self.fc_graph_target.forward(target_graph1, relu=True)
middle_graph1 = torch.cat((middle_graph1, source_2_middle_graph1, source_2_middle_graph1_v5, target_2_middle_graph1, target_2_middle_graph1_v5), dim=(- 1))
middle_graph1 = self.fc_graph_middle.forward(middle_graph1, relu=True)
source_graph2 = self.source_graph_conv1.forward(source_graph1, adj=adj2_source, relu=True)
target_graph2 = self.target_graph_conv1.forward(target_graph1, adj=adj1_target, relu=True)
middle_graph2 = self.target_graph_conv1.forward(middle_graph1, adj=adj4_middle, relu=True)
source_2_target_graph2_v5 = self.transpose_graph_source2target.forward(source_graph2, adj=adj3_transfer_s2t, relu=True)
source_2_middle_graph2_v5 = self.transpose_graph_source2middle.forward(source_graph2, adj=adj5_transfer_s2m, relu=True)
target_2_source_graph2_v5 = self.transpose_graph_target2source.forward(target_graph2, adj=adj3_transfer_t2s, relu=True)
target_2_middle_graph2_v5 = self.transpose_graph_target2middle.forward(target_graph2, adj=adj6_transfer_t2m, relu=True)
middle_2_source_graph2_v5 = self.transpose_graph_middle2source.forward(middle_graph2, adj=adj5_transfer_m2s, relu=True)
middle_2_target_graph2_v5 = self.transpose_graph_middle2target.forward(middle_graph2, adj=adj6_transfer_m2t, relu=True)
source_2_target_graph2 = self.similarity_trans(source_graph2, target_graph2)
source_2_middle_graph2 = self.similarity_trans(source_graph2, middle_graph2)
target_2_source_graph2 = self.similarity_trans(target_graph2, source_graph2)
target_2_middle_graph2 = self.similarity_trans(target_graph2, middle_graph2)
middle_2_source_graph2 = self.similarity_trans(middle_graph2, source_graph2)
middle_2_target_graph2 = self.similarity_trans(middle_graph2, target_graph2)
source_graph2 = torch.cat((source_graph2, target_2_source_graph2, target_2_source_graph2_v5, middle_2_source_graph2, middle_2_source_graph2_v5), dim=(- 1))
source_graph2 = self.fc_graph_source.forward(source_graph2, relu=True)
target_graph2 = torch.cat((target_graph2, source_2_target_graph2, source_2_target_graph2_v5, middle_2_target_graph2, middle_2_target_graph2_v5), dim=(- 1))
target_graph2 = self.fc_graph_target.forward(target_graph2, relu=True)
middle_graph2 = torch.cat((middle_graph2, source_2_middle_graph2, source_2_middle_graph2_v5, target_2_middle_graph2, target_2_middle_graph2_v5), dim=(- 1))
middle_graph2 = self.fc_graph_middle.forward(middle_graph2, relu=True)
source_graph3 = self.source_graph_conv1.forward(source_graph2, adj=adj2_source, relu=True)
target_graph3 = self.target_graph_conv1.forward(target_graph2, adj=adj1_target, relu=True)
middle_graph3 = self.target_graph_conv1.forward(middle_graph2, adj=adj4_middle, relu=True)
source_2_target_graph3_v5 = self.transpose_graph_source2target.forward(source_graph3, adj=adj3_transfer_s2t, relu=True)
source_2_middle_graph3_v5 = self.transpose_graph_source2middle.forward(source_graph3, adj=adj5_transfer_s2m, relu=True)
target_2_source_graph3_v5 = self.transpose_graph_target2source.forward(target_graph3, adj=adj3_transfer_t2s, relu=True)
target_2_middle_graph3_v5 = self.transpose_graph_target2middle.forward(target_graph3, adj=adj6_transfer_t2m, relu=True)
middle_2_source_graph3_v5 = self.transpose_graph_middle2source.forward(middle_graph3, adj=adj5_transfer_m2s, relu=True)
middle_2_target_graph3_v5 = self.transpose_graph_middle2target.forward(middle_graph3, adj=adj6_transfer_m2t, relu=True)
source_2_target_graph3 = self.similarity_trans(source_graph3, target_graph3)
source_2_middle_graph3 = self.similarity_trans(source_graph3, middle_graph3)
target_2_source_graph3 = self.similarity_trans(target_graph3, source_graph3)
target_2_middle_graph3 = self.similarity_trans(target_graph3, middle_graph3)
middle_2_source_graph3 = self.similarity_trans(middle_graph3, source_graph3)
middle_2_target_graph3 = self.similarity_trans(middle_graph3, target_graph3)
source_graph3 = torch.cat((source_graph3, target_2_source_graph3, target_2_source_graph3_v5, middle_2_source_graph3, middle_2_source_graph3_v5), dim=(- 1))
source_graph3 = self.fc_graph_source.forward(source_graph3, relu=True)
target_graph3 = torch.cat((target_graph3, source_2_target_graph3, source_2_target_graph3_v5, middle_2_target_graph3, middle_2_target_graph3_v5), dim=(- 1))
target_graph3 = self.fc_graph_target.forward(target_graph3, relu=True)
middle_graph3 = torch.cat((middle_graph3, source_2_middle_graph3, source_2_middle_graph3_v5, target_2_middle_graph3, target_2_middle_graph3_v5), dim=(- 1))
middle_graph3 = self.fc_graph_middle.forward(middle_graph3, relu=True)
return (source_graph3, target_graph3, middle_graph3, x)
def similarity_trans(self, source, target):
sim = torch.matmul(F.normalize(target, p=2, dim=(- 1)), F.normalize(source, p=2, dim=(- 1)).transpose((- 1), (- 2)))
sim = F.softmax(sim, dim=(- 1))
return torch.matmul(sim, source)
def bottom_forward_source(self, input, source_graph):
graph = self.source_graph_2_fea.forward(source_graph, input)
x = self.source_skip_conv(input)
x = (x + graph)
x = self.source_semantic(x)
return x
def bottom_forward_target(self, input, target_graph):
graph = self.target_graph_2_fea.forward(target_graph, input)
x = self.target_skip_conv(input)
x = (x + graph)
x = self.semantic(x)
return x
def bottom_forward_middle(self, input, target_graph):
graph = self.middle_graph_2_fea.forward(target_graph, input)
x = self.middle_skip_conv(input)
x = (x + graph)
x = self.middle_semantic(x)
return x
def forward(self, input_source, input_target=None, input_middle=None, adj1_target=None, adj2_source=None, adj3_transfer_s2t=None, adj3_transfer_t2s=None, adj4_middle=None, adj5_transfer_s2m=None, adj6_transfer_t2m=None, adj5_transfer_m2s=None, adj6_transfer_m2t=None):
if ((input_source is None) and (input_target is not None) and (input_middle is None)):
target_batch = input_target.size(0)
input = input_target
(source_graph, target_graph, middle_graph, x) = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, adj3_transfer_s2t=adj3_transfer_s2t, adj3_transfer_t2s=adj3_transfer_t2s, adj4_middle=adj4_middle, adj5_transfer_s2m=adj5_transfer_s2m, adj6_transfer_t2m=adj6_transfer_t2m, adj5_transfer_m2s=adj5_transfer_m2s, adj6_transfer_m2t=adj6_transfer_m2t)
target_x = self.bottom_forward_target(x, target_graph)
target_x = F.upsample(target_x, size=input.size()[2:], mode='bilinear', align_corners=True)
return (None, target_x, None)
if ((input_source is not None) and (input_target is None) and (input_middle is None)):
source_batch = input_source.size(0)
source_list = range(source_batch)
input = input_source
(source_graph, target_graph, middle_graph, x) = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, adj3_transfer_s2t=adj3_transfer_s2t, adj3_transfer_t2s=adj3_transfer_t2s, adj4_middle=adj4_middle, adj5_transfer_s2m=adj5_transfer_s2m, adj6_transfer_t2m=adj6_transfer_t2m, adj5_transfer_m2s=adj5_transfer_m2s, adj6_transfer_m2t=adj6_transfer_m2t)
source_x = self.bottom_forward_source(x, source_graph)
source_x = F.upsample(source_x, size=input.size()[2:], mode='bilinear', align_corners=True)
return (source_x, None, None)
if ((input_middle is not None) and (input_source is None) and (input_target is None)):
input = input_middle
(source_graph, target_graph, middle_graph, x) = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, adj3_transfer_s2t=adj3_transfer_s2t, adj3_transfer_t2s=adj3_transfer_t2s, adj4_middle=adj4_middle, adj5_transfer_s2m=adj5_transfer_s2m, adj6_transfer_t2m=adj6_transfer_t2m, adj5_transfer_m2s=adj5_transfer_m2s, adj6_transfer_m2t=adj6_transfer_m2t)
middle_x = self.bottom_forward_middle(x, source_graph)
middle_x = F.upsample(middle_x, size=input.size()[2:], mode='bilinear', align_corners=True)
return (None, None, middle_x)
|
class deeplab_xception_end2end_3d_synbn(deeplab_xception_transfer_basemodel_savememory_synbn):
def __init__(self, nInputChannels=3, n_classes=20, os=16, input_channels=256, hidden_layers=128, out_channels=256, source_classes=7, middle_classes=18, transfer_graph=None):
super(deeplab_xception_end2end_3d_synbn, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
self.source_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.source_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=source_classes)
self.source_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
self.source_semantic = nn.Conv2d(out_channels, source_classes, 1)
self.middle_semantic = nn.Conv2d(out_channels, middle_classes, 1)
self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
self.middle_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=middle_classes)
self.middle_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.middle_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.middle_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.middle_graph_2_fea = gcn.Graph_to_Featuremaps_savemem(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.middle_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
self.transpose_graph_source2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=n_classes)
self.transpose_graph_target2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=n_classes, end_nodes=source_classes)
self.transpose_graph_middle2source = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=middle_classes, end_nodes=source_classes)
self.transpose_graph_middle2target = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=middle_classes, end_nodes=source_classes)
self.transpose_graph_source2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=source_classes, end_nodes=middle_classes)
self.transpose_graph_target2middle = gcn.Graph_trans(in_features=hidden_layers, out_features=hidden_layers, adj=transfer_graph, begin_nodes=n_classes, end_nodes=middle_classes)
self.fc_graph_source = gcn.GraphConvolution((hidden_layers * 5), hidden_layers)
self.fc_graph_target = gcn.GraphConvolution((hidden_layers * 5), hidden_layers)
self.fc_graph_middle = gcn.GraphConvolution((hidden_layers * 5), hidden_layers)
def top_forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer_s2t=None, adj3_transfer_t2s=None, adj4_middle=None, adj5_transfer_s2m=None, adj6_transfer_t2m=None, adj5_transfer_m2s=None, adj6_transfer_m2t=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
source_graph = self.source_featuremap_2_graph(x)
target_graph = self.target_featuremap_2_graph(x)
middle_graph = self.middle_featuremap_2_graph(x)
source_graph1 = self.source_graph_conv1.forward(source_graph, adj=adj2_source, relu=True)
target_graph1 = self.target_graph_conv1.forward(target_graph, adj=adj1_target, relu=True)
middle_graph1 = self.target_graph_conv1.forward(middle_graph, adj=adj4_middle, relu=True)
source_2_target_graph1_v5 = self.transpose_graph_source2target.forward(source_graph1, adj=adj3_transfer_s2t, relu=True)
source_2_middle_graph1_v5 = self.transpose_graph_source2middle.forward(source_graph1, adj=adj5_transfer_s2m, relu=True)
target_2_source_graph1_v5 = self.transpose_graph_target2source.forward(target_graph1, adj=adj3_transfer_t2s, relu=True)
target_2_middle_graph1_v5 = self.transpose_graph_target2middle.forward(target_graph1, adj=adj6_transfer_t2m, relu=True)
middle_2_source_graph1_v5 = self.transpose_graph_middle2source.forward(middle_graph1, adj=adj5_transfer_m2s, relu=True)
middle_2_target_graph1_v5 = self.transpose_graph_middle2target.forward(middle_graph1, adj=adj6_transfer_m2t, relu=True)
source_2_target_graph1 = self.similarity_trans(source_graph1, target_graph1)
source_2_middle_graph1 = self.similarity_trans(source_graph1, middle_graph1)
target_2_source_graph1 = self.similarity_trans(target_graph1, source_graph1)
target_2_middle_graph1 = self.similarity_trans(target_graph1, middle_graph1)
middle_2_source_graph1 = self.similarity_trans(middle_graph1, source_graph1)
middle_2_target_graph1 = self.similarity_trans(middle_graph1, target_graph1)
source_graph1 = torch.cat((source_graph1, target_2_source_graph1, target_2_source_graph1_v5, middle_2_source_graph1, middle_2_source_graph1_v5), dim=(- 1))
source_graph1 = self.fc_graph_source.forward(source_graph1, relu=True)
target_graph1 = torch.cat((target_graph1, source_2_target_graph1, source_2_target_graph1_v5, middle_2_target_graph1, middle_2_target_graph1_v5), dim=(- 1))
target_graph1 = self.fc_graph_target.forward(target_graph1, relu=True)
middle_graph1 = torch.cat((middle_graph1, source_2_middle_graph1, source_2_middle_graph1_v5, target_2_middle_graph1, target_2_middle_graph1_v5), dim=(- 1))
middle_graph1 = self.fc_graph_middle.forward(middle_graph1, relu=True)
source_graph2 = self.source_graph_conv1.forward(source_graph1, adj=adj2_source, relu=True)
target_graph2 = self.target_graph_conv1.forward(target_graph1, adj=adj1_target, relu=True)
middle_graph2 = self.target_graph_conv1.forward(middle_graph1, adj=adj4_middle, relu=True)
source_2_target_graph2_v5 = self.transpose_graph_source2target.forward(source_graph2, adj=adj3_transfer_s2t, relu=True)
source_2_middle_graph2_v5 = self.transpose_graph_source2middle.forward(source_graph2, adj=adj5_transfer_s2m, relu=True)
target_2_source_graph2_v5 = self.transpose_graph_target2source.forward(target_graph2, adj=adj3_transfer_t2s, relu=True)
target_2_middle_graph2_v5 = self.transpose_graph_target2middle.forward(target_graph2, adj=adj6_transfer_t2m, relu=True)
middle_2_source_graph2_v5 = self.transpose_graph_middle2source.forward(middle_graph2, adj=adj5_transfer_m2s, relu=True)
middle_2_target_graph2_v5 = self.transpose_graph_middle2target.forward(middle_graph2, adj=adj6_transfer_m2t, relu=True)
source_2_target_graph2 = self.similarity_trans(source_graph2, target_graph2)
source_2_middle_graph2 = self.similarity_trans(source_graph2, middle_graph2)
target_2_source_graph2 = self.similarity_trans(target_graph2, source_graph2)
target_2_middle_graph2 = self.similarity_trans(target_graph2, middle_graph2)
middle_2_source_graph2 = self.similarity_trans(middle_graph2, source_graph2)
middle_2_target_graph2 = self.similarity_trans(middle_graph2, target_graph2)
source_graph2 = torch.cat((source_graph2, target_2_source_graph2, target_2_source_graph2_v5, middle_2_source_graph2, middle_2_source_graph2_v5), dim=(- 1))
source_graph2 = self.fc_graph_source.forward(source_graph2, relu=True)
target_graph2 = torch.cat((target_graph2, source_2_target_graph2, source_2_target_graph2_v5, middle_2_target_graph2, middle_2_target_graph2_v5), dim=(- 1))
target_graph2 = self.fc_graph_target.forward(target_graph2, relu=True)
middle_graph2 = torch.cat((middle_graph2, source_2_middle_graph2, source_2_middle_graph2_v5, target_2_middle_graph2, target_2_middle_graph2_v5), dim=(- 1))
middle_graph2 = self.fc_graph_middle.forward(middle_graph2, relu=True)
source_graph3 = self.source_graph_conv1.forward(source_graph2, adj=adj2_source, relu=True)
target_graph3 = self.target_graph_conv1.forward(target_graph2, adj=adj1_target, relu=True)
middle_graph3 = self.target_graph_conv1.forward(middle_graph2, adj=adj4_middle, relu=True)
source_2_target_graph3_v5 = self.transpose_graph_source2target.forward(source_graph3, adj=adj3_transfer_s2t, relu=True)
source_2_middle_graph3_v5 = self.transpose_graph_source2middle.forward(source_graph3, adj=adj5_transfer_s2m, relu=True)
target_2_source_graph3_v5 = self.transpose_graph_target2source.forward(target_graph3, adj=adj3_transfer_t2s, relu=True)
target_2_middle_graph3_v5 = self.transpose_graph_target2middle.forward(target_graph3, adj=adj6_transfer_t2m, relu=True)
middle_2_source_graph3_v5 = self.transpose_graph_middle2source.forward(middle_graph3, adj=adj5_transfer_m2s, relu=True)
middle_2_target_graph3_v5 = self.transpose_graph_middle2target.forward(middle_graph3, adj=adj6_transfer_m2t, relu=True)
source_2_target_graph3 = self.similarity_trans(source_graph3, target_graph3)
source_2_middle_graph3 = self.similarity_trans(source_graph3, middle_graph3)
target_2_source_graph3 = self.similarity_trans(target_graph3, source_graph3)
target_2_middle_graph3 = self.similarity_trans(target_graph3, middle_graph3)
middle_2_source_graph3 = self.similarity_trans(middle_graph3, source_graph3)
middle_2_target_graph3 = self.similarity_trans(middle_graph3, target_graph3)
source_graph3 = torch.cat((source_graph3, target_2_source_graph3, target_2_source_graph3_v5, middle_2_source_graph3, middle_2_source_graph3_v5), dim=(- 1))
source_graph3 = self.fc_graph_source.forward(source_graph3, relu=True)
target_graph3 = torch.cat((target_graph3, source_2_target_graph3, source_2_target_graph3_v5, middle_2_target_graph3, middle_2_target_graph3_v5), dim=(- 1))
target_graph3 = self.fc_graph_target.forward(target_graph3, relu=True)
middle_graph3 = torch.cat((middle_graph3, source_2_middle_graph3, source_2_middle_graph3_v5, target_2_middle_graph3, target_2_middle_graph3_v5), dim=(- 1))
middle_graph3 = self.fc_graph_middle.forward(middle_graph3, relu=True)
return (source_graph3, target_graph3, middle_graph3, x)
def similarity_trans(self, source, target):
sim = torch.matmul(F.normalize(target, p=2, dim=(- 1)), F.normalize(source, p=2, dim=(- 1)).transpose((- 1), (- 2)))
sim = F.softmax(sim, dim=(- 1))
return torch.matmul(sim, source)
def bottom_forward_source(self, input, source_graph):
graph = self.source_graph_2_fea.forward(source_graph, input)
x = self.source_skip_conv(input)
x = (x + graph)
x = self.source_semantic(x)
return x
def bottom_forward_target(self, input, target_graph):
graph = self.target_graph_2_fea.forward(target_graph, input)
x = self.target_skip_conv(input)
x = (x + graph)
x = self.semantic(x)
return x
def bottom_forward_middle(self, input, target_graph):
graph = self.middle_graph_2_fea.forward(target_graph, input)
x = self.middle_skip_conv(input)
x = (x + graph)
x = self.middle_semantic(x)
return x
def forward(self, input_source, input_target=None, input_middle=None, adj1_target=None, adj2_source=None, adj3_transfer_s2t=None, adj3_transfer_t2s=None, adj4_middle=None, adj5_transfer_s2m=None, adj6_transfer_t2m=None, adj5_transfer_m2s=None, adj6_transfer_m2t=None):
if ((input_source is None) and (input_target is not None) and (input_middle is None)):
target_batch = input_target.size(0)
input = input_target
(source_graph, target_graph, middle_graph, x) = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, adj3_transfer_s2t=adj3_transfer_s2t, adj3_transfer_t2s=adj3_transfer_t2s, adj4_middle=adj4_middle, adj5_transfer_s2m=adj5_transfer_s2m, adj6_transfer_t2m=adj6_transfer_t2m, adj5_transfer_m2s=adj5_transfer_m2s, adj6_transfer_m2t=adj6_transfer_m2t)
target_x = self.bottom_forward_target(x, target_graph)
target_x = F.upsample(target_x, size=input.size()[2:], mode='bilinear', align_corners=True)
return (None, target_x, None)
if ((input_source is not None) and (input_target is None) and (input_middle is None)):
source_batch = input_source.size(0)
source_list = range(source_batch)
input = input_source
(source_graph, target_graph, middle_graph, x) = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, adj3_transfer_s2t=adj3_transfer_s2t, adj3_transfer_t2s=adj3_transfer_t2s, adj4_middle=adj4_middle, adj5_transfer_s2m=adj5_transfer_s2m, adj6_transfer_t2m=adj6_transfer_t2m, adj5_transfer_m2s=adj5_transfer_m2s, adj6_transfer_m2t=adj6_transfer_m2t)
source_x = self.bottom_forward_source(x, source_graph)
source_x = F.upsample(source_x, size=input.size()[2:], mode='bilinear', align_corners=True)
return (source_x, None, None)
if ((input_middle is not None) and (input_source is None) and (input_target is None)):
input = input_middle
(source_graph, target_graph, middle_graph, x) = self.top_forward(input, adj1_target=adj1_target, adj2_source=adj2_source, adj3_transfer_s2t=adj3_transfer_s2t, adj3_transfer_t2s=adj3_transfer_t2s, adj4_middle=adj4_middle, adj5_transfer_s2m=adj5_transfer_s2m, adj6_transfer_t2m=adj6_transfer_t2m, adj5_transfer_m2s=adj5_transfer_m2s, adj6_transfer_m2t=adj6_transfer_m2t)
middle_x = self.bottom_forward_middle(x, source_graph)
middle_x = F.upsample(middle_x, size=input.size()[2:], mode='bilinear', align_corners=True)
return (None, None, middle_x)
|
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, input, adj=None, relu=False):
support = torch.matmul(input, self.weight)
if (adj is not None):
output = torch.matmul(adj, support)
else:
output = support
if (self.bias is not None):
return (output + self.bias)
elif relu:
return F.relu(output)
else:
return output
def __repr__(self):
return (((((self.__class__.__name__ + ' (') + str(self.in_features)) + ' -> ') + str(self.out_features)) + ')')
|
class Featuremaps_to_Graph(nn.Module):
def __init__(self, input_channels, hidden_layers, nodes=7):
super(Featuremaps_to_Graph, self).__init__()
self.pre_fea = Parameter(torch.FloatTensor(input_channels, nodes))
self.weight = Parameter(torch.FloatTensor(input_channels, hidden_layers))
self.reset_parameters()
def forward(self, input):
(n, c, h, w) = input.size()
input1 = input.view(n, c, (h * w))
input1 = input1.transpose(1, 2)
fea_node = torch.matmul(input1, self.pre_fea)
weight_node = torch.matmul(input1, self.weight)
fea_node = F.softmax(fea_node, dim=(- 1))
graph_node = F.relu(torch.matmul(fea_node.transpose(1, 2), weight_node))
return graph_node
def reset_parameters(self):
for ww in self.parameters():
torch.nn.init.xavier_uniform_(ww)
|
class Featuremaps_to_Graph_transfer(nn.Module):
def __init__(self, input_channels, hidden_layers, nodes=7, source_nodes=20):
super(Featuremaps_to_Graph_transfer, self).__init__()
self.pre_fea = Parameter(torch.FloatTensor(input_channels, nodes))
self.weight = Parameter(torch.FloatTensor(input_channels, hidden_layers))
self.pre_fea_transfer = nn.Sequential(*[nn.Linear(source_nodes, source_nodes), nn.LeakyReLU(True), nn.Linear(source_nodes, nodes), nn.LeakyReLU(True)])
self.reset_parameters()
def forward(self, input, source_pre_fea):
self.pre_fea.data = self.pre_fea_learn(source_pre_fea)
(n, c, h, w) = input.size()
input1 = input.view(n, c, (h * w))
input1 = input1.transpose(1, 2)
fea_node = torch.matmul(input1, self.pre_fea)
weight_node = torch.matmul(input1, self.weight)
fea_node = F.softmax(fea_node, dim=1)
graph_node = F.relu(torch.matmul(fea_node.transpose(1, 2), weight_node))
return graph_node
def pre_fea_learn(self, input):
pre_fea = self.pre_fea_transfer.forward(input.unsqueeze(0)).squeeze(0)
return (self.pre_fea.data + pre_fea)
|
class Graph_to_Featuremaps(nn.Module):
def __init__(self, input_channels, output_channels, hidden_layers, nodes=7):
super(Graph_to_Featuremaps, self).__init__()
self.node_fea = Parameter(torch.FloatTensor((input_channels + hidden_layers), 1))
self.weight = Parameter(torch.FloatTensor(hidden_layers, output_channels))
self.reset_parameters()
def reset_parameters(self):
for ww in self.parameters():
torch.nn.init.xavier_uniform_(ww)
def forward(self, input, res_feature):
'\n\n :param input: 1 x batch x nodes x hidden_layer\n :param res_feature: batch x channels x h x w\n :return:\n '
(batchi, channeli, hi, wi) = res_feature.size()
try:
(_, batch, nodes, hidden) = input.size()
except:
input = input.unsqueeze(0)
(_, batch, nodes, hidden) = input.size()
assert (batch == batchi)
input1 = input.transpose(0, 1).expand(batch, (hi * wi), nodes, hidden)
res_feature_after_view = res_feature.view(batch, channeli, (hi * wi)).transpose(1, 2)
res_feature_after_view1 = res_feature_after_view.unsqueeze(2).expand(batch, (hi * wi), nodes, channeli)
new_fea = torch.cat((res_feature_after_view1, input1), dim=3)
new_node = torch.matmul(new_fea, self.node_fea)
new_weight = torch.matmul(input, self.weight)
new_node = new_node.view(batch, (hi * wi), nodes)
new_node = F.softmax(new_node, dim=(- 1))
feature_out = torch.matmul(new_node, new_weight)
feature_out = feature_out.transpose(2, 3).contiguous().view(res_feature.size())
return F.relu(feature_out)
|
class Graph_to_Featuremaps_savemem(nn.Module):
def __init__(self, input_channels, output_channels, hidden_layers, nodes=7):
super(Graph_to_Featuremaps_savemem, self).__init__()
self.node_fea_for_res = Parameter(torch.FloatTensor(input_channels, 1))
self.node_fea_for_hidden = Parameter(torch.FloatTensor(hidden_layers, 1))
self.weight = Parameter(torch.FloatTensor(hidden_layers, output_channels))
self.reset_parameters()
def reset_parameters(self):
for ww in self.parameters():
torch.nn.init.xavier_uniform_(ww)
def forward(self, input, res_feature):
'\n\n :param input: 1 x batch x nodes x hidden_layer\n :param res_feature: batch x channels x h x w\n :return:\n '
(batchi, channeli, hi, wi) = res_feature.size()
try:
(_, batch, nodes, hidden) = input.size()
except:
input = input.unsqueeze(0)
(_, batch, nodes, hidden) = input.size()
assert (batch == batchi)
input1 = input.transpose(0, 1).expand(batch, (hi * wi), nodes, hidden)
res_feature_after_view = res_feature.view(batch, channeli, (hi * wi)).transpose(1, 2)
res_feature_after_view1 = res_feature_after_view.unsqueeze(2).expand(batch, (hi * wi), nodes, channeli)
new_node1 = torch.matmul(res_feature_after_view1, self.node_fea_for_res)
new_node2 = torch.matmul(input1, self.node_fea_for_hidden)
new_node = (new_node1 + new_node2)
new_weight = torch.matmul(input, self.weight)
new_node = new_node.view(batch, (hi * wi), nodes)
new_node = F.softmax(new_node, dim=(- 1))
feature_out = torch.matmul(new_node, new_weight)
feature_out = feature_out.transpose(2, 3).contiguous().view(res_feature.size())
return F.relu(feature_out)
|
class Graph_trans(nn.Module):
def __init__(self, in_features, out_features, begin_nodes=7, end_nodes=2, bias=False, adj=None):
super(Graph_trans, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if (adj is not None):
(h, w) = adj.size()
assert ((h == end_nodes) and (w == begin_nodes))
self.adj = torch.autograd.Variable(adj, requires_grad=False)
else:
self.adj = Parameter(torch.FloatTensor(end_nodes, begin_nodes))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight)
def forward(self, input, relu=False, adj_return=False, adj=None):
support = torch.matmul(input, self.weight)
if (adj is None):
adj = self.adj
adj1 = self.norm_trans_adj(adj)
output = torch.matmul(adj1, support)
if adj_return:
output1 = F.normalize(output, p=2, dim=(- 1))
self.adj_mat = torch.matmul(output1, output1.transpose((- 2), (- 1)))
if (self.bias is not None):
return (output + self.bias)
elif relu:
return F.relu(output)
else:
return output
def get_adj_mat(self):
adj = graph.normalize_adj_torch(F.relu(self.adj_mat))
return adj
def get_encode_adj(self):
return self.adj
def norm_trans_adj(self, adj):
adj = F.relu(adj)
r = F.softmax(adj, dim=(- 1))
return r
|
def normalize_adj(adj):
'Symmetrically normalize adjacency matrix.'
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, (- 0.5)).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
|
def preprocess_adj(adj):
'Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.'
adj = nx.adjacency_matrix(nx.from_dict_of_lists(adj))
adj_normalized = normalize_adj((adj + sp.eye(adj.shape[0])))
return adj_normalized.todense()
|
def row_norm(inputs):
outputs = []
for x in inputs:
xsum = x.sum()
x = (x / xsum)
outputs.append(x)
return outputs
|
def normalize_adj_torch(adj):
if (len(adj.size()) == 4):
new_r = torch.zeros(adj.size()).type_as(adj)
for i in range(adj.size(1)):
adj_item = adj[(0, i)]
rowsum = adj_item.sum(1)
d_inv_sqrt = rowsum.pow_((- 0.5))
d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0
d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
r = torch.matmul(torch.matmul(d_mat_inv_sqrt, adj_item), d_mat_inv_sqrt)
new_r[(0, i, ...)] = r
return new_r
rowsum = adj.sum(1)
d_inv_sqrt = rowsum.pow_((- 0.5))
d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0
d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
r = torch.matmul(torch.matmul(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return r
|
def _sum_ft(tensor):
'sum over the first and last dimention'
return tensor.sum(dim=0).sum(dim=(- 1))
|
def _unsqueeze_ft(tensor):
'add new dementions at the front and the tail'
return tensor.unsqueeze(0).unsqueeze((- 1))
|
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True):
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
if (not (self._is_parallel and self.training)):
return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps)
input_shape = input.size()
input = input.view(input.size(0), self.num_features, (- 1))
sum_size = (input.size(0) * input.size(2))
input_sum = _sum_ft(input)
input_ssum = _sum_ft((input ** 2))
if (self._parallel_id == 0):
(mean, inv_std) = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
(mean, inv_std) = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
if self.affine:
output = (((input - _unsqueeze_ft(mean)) * _unsqueeze_ft((inv_std * self.weight))) + _unsqueeze_ft(self.bias))
else:
output = ((input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std))
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
if (self._parallel_id == 0):
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
'Reduce the sum and square-sum, compute the statistics, and broadcast it.'
intermediates = sorted(intermediates, key=(lambda i: i[1].sum.get_device()))
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i]
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
(sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
(mean, inv_std) = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for (i, rec) in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[(i * 2):((i * 2) + 2)])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
'Compute the mean and standard-deviation with sum and square-sum. This method\n also maintains the moving average on the master device.'
assert (size > 1), 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = (sum_ / size)
sumvar = (ssum - (sum_ * mean))
unbias_var = (sumvar / (size - 1))
bias_var = (sumvar / size)
self.running_mean = (((1 - self.momentum) * self.running_mean) + (self.momentum * mean.data))
self.running_var = (((1 - self.momentum) * self.running_var) + (self.momentum * unbias_var.data))
return (mean, (bias_var.clamp(self.eps) ** (- 0.5)))
|
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
"Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a\n mini-batch.\n\n .. math::\n\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n\n This module differs from the built-in PyTorch BatchNorm1d as the mean and\n standard-deviation are reduced across all devices during training.\n\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n \n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n\n During evaluation, this running mean/variance is used for normalization.\n\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm\n\n Args:\n num_features: num_features from an expected input of size\n `batch_size x num_features [x width]`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C)` or :math:`(N, C, L)`\n - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)\n\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm1d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm1d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if ((input.dim() != 2) and (input.dim() != 3)):
raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
|
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
"Applies Batch Normalization over a 4d input that is seen as a mini-batch\n of 3d inputs\n\n .. math::\n\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n\n This module differs from the built-in PyTorch BatchNorm2d as the mean and\n standard-deviation are reduced across all devices during training.\n\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n \n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n\n During evaluation, this running mean/variance is used for normalization.\n\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm\n\n Args:\n num_features: num_features from an expected input of\n size batch_size x num_features x height x width\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm2d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm2d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
|
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
"Applies Batch Normalization over a 5d input that is seen as a mini-batch\n of 4d inputs\n\n .. math::\n\n y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n\n This module differs from the built-in PyTorch BatchNorm3d as the mean and\n standard-deviation are reduced across all devices during training.\n\n For example, when one uses `nn.DataParallel` to wrap the network during\n training, PyTorch's implementation normalize the tensor on each device using\n the statistics only on that device, which accelerated the computation and\n is also easy to implement, but the statistics might be inaccurate.\n Instead, in this synchronized version, the statistics will be computed\n over all training samples distributed on multiple devices.\n \n Note that, for one-GPU or CPU-only case, this module behaves exactly same\n as the built-in PyTorch implementation.\n\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and gamma and beta are learnable parameter vectors\n of size C (where C is the input size).\n\n During training, this layer keeps a running estimate of its computed mean\n and variance. The running sum is kept with a default momentum of 0.1.\n\n During evaluation, this running mean/variance is used for normalization.\n\n Because the BatchNorm is done over the `C` dimension, computing statistics\n on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm\n or Spatio-temporal BatchNorm\n\n Args:\n num_features: num_features from an expected input of\n size batch_size x num_features x depth x height x width\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Default: 0.1\n affine: a boolean value that when set to ``True``, gives the layer learnable\n affine parameters. Default: ``True``\n\n Shape:\n - Input: :math:`(N, C, D, H, W)`\n - Output: :math:`(N, C, D, H, W)` (same shape as input)\n\n Examples:\n >>> # With Learnable Parameters\n >>> m = SynchronizedBatchNorm3d(100)\n >>> # Without Learnable Parameters\n >>> m = SynchronizedBatchNorm3d(100, affine=False)\n >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))\n >>> output = m(input)\n "
def _check_input_dim(self, input):
if (input.dim() != 5):
raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
|
class FutureResult(object):
'A thread-safe future implementation. Used only as one-to-one pipe.'
def __init__(self):
self._result = None
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
def put(self, result):
with self._lock:
assert (self._result is None), "Previous result has't been fetched."
self._result = result
self._cond.notify()
def get(self):
with self._lock:
if (self._result is None):
self._cond.wait()
res = self._result
self._result = None
return res
|
class SlavePipe(_SlavePipeBase):
'Pipe for master-slave communication.'
def run_slave(self, msg):
self.queue.put((self.identifier, msg))
ret = self.result.get()
self.queue.put(True)
return ret
|
class SyncMaster(object):
'An abstract `SyncMaster` object.\n\n - During the replication, as the data parallel will trigger an callback of each module, all slave devices should\n call `register(id)` and obtain an `SlavePipe` to communicate with the master.\n - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected,\n and passed to a registered callback.\n - After receiving the messages, the master device should gather the information and determine to message passed\n back to each slave devices.\n '
def __init__(self, master_callback):
'\n\n Args:\n master_callback: a callback to be invoked after having collected messages from slave devices.\n '
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def __getstate__(self):
return {'master_callback': self._master_callback}
def __setstate__(self, state):
self.__init__(state['master_callback'])
def register_slave(self, identifier):
'\n Register an slave device.\n\n Args:\n identifier: an identifier, usually is the device id.\n\n Returns: a `SlavePipe` object which can be used to communicate with the master device.\n\n '
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
'\n Main entry for the master device in each forward pass.\n The messages were first collected from each devices (including the master device), and then\n an callback will be invoked to compute the message to be sent back to each devices\n (including the master device).\n\n Args:\n master_msg: the message that the master want to send to itself. This will be placed as the first\n message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example.\n\n Returns: the message to be sent back to the master device.\n\n '
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert (results[0][0] == 0), 'The first result should belongs to the master.'
for (i, res) in results:
if (i == 0):
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert (self._queue.get() is True)
return results[0][1]
@property
def nr_slaves(self):
return len(self._registry)
|
class CallbackContext(object):
pass
|
def execute_replication_callbacks(modules):
'\n Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.\n\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n\n Note that, as all modules are isomorphism, we assign each sub-module with a context\n (shared among multiple copies of this module on different devices).\n Through this context, different copies can share some information.\n\n We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback\n of any slave copies.\n '
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for (i, module) in enumerate(modules):
for (j, m) in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
|
class DataParallelWithCallback(DataParallel):
'\n Data Parallel with a replication callback.\n\n An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by\n original `replicate` function.\n The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`\n\n Examples:\n > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)\n > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])\n # sync_bn.__data_parallel_replicate__ will be invoked.\n '
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.