repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
Relation-CZSL | Relation-CZSL-master/model/pygcn.py | import math
import numpy as np
from scipy.sparse import diags
import torch
from torch.nn import Module, Parameter
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
Re-implemented using Conv1d to support batch operation.
"""
def __init__(self, in_features, out_features, bias=True, groups=1, adj=None, **kwargs):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.adj = adj
self.groups = groups
# TODO: to support different weights for different nodes (maybe unfold -> matmul -> fold trick)
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.out_features)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj=None, should_normalize=True):
support = torch.mm(x, self.weight)
if should_normalize:
adj = torch.Tensor(normalize(adj)).to(self.weight.device)
output = torch.matmul(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', ' + f'Groups={self.groups}' + ')'
if __name__ == "__main__":
gc = GraphConvolution(in_features=512, out_features=32, bias=True)
x = torch.randn((32, 512))
adj = torch.randn((32, 32))
print(gc(x, adj).shape) | 2,132 | 31.318182 | 103 | py |
Relation-CZSL | Relation-CZSL-master/model/datasets/CompositionDataset.py | from PIL import Image
import random
import numpy as np
import torch
import torch.utils.data as tdata
import torchvision.transforms as transforms
class ImageLoader:
def __init__(self, root):
self.img_dir = root
def __call__(self, img):
file = '%s/%s' % (self.img_dir, img)
img = Image.open(file).convert('RGB')
return img
def imagenet_transform(phase):
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
if phase == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
# transform = transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224),
# transforms.ToTensor(),
# transforms.Normalize(mean, std)
# ])
elif phase in ['val', 'test']:
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
return transform
class CompositionDataset(tdata.Dataset):
def __init__(self, root, phase, embedding_dict=None, split='compositional-split', getitem_behavior=None, precompute_feat=True, **kwargs):
self.root = root
self.phase = phase
self.getitem_behavior = phase if getitem_behavior is None else getitem_behavior
self.split = split
self.precompute_feat = precompute_feat
self.feat_dim = None
self.transform = imagenet_transform(phase)
self.loader = ImageLoader(self.root + '/images/')
self.attrs, self.objs, self.pairs, \
self.train_pairs, self.val_pairs, \
self.test_pairs = self.parse_split()
self.train_data, self.val_data, self.test_data = self.get_split_info()
if self.phase == 'train':
self.data = self.train_data
elif self.phase == 'val':
self.data = self.val_data
else:
self.data = self.test_data
self.attr2idx = {attr: idx for idx, attr in enumerate(self.attrs)}
self.obj2idx = {obj: idx for idx, obj in enumerate(self.objs)}
self.pair2idx = {pair: idx for idx, pair in enumerate(self.pairs)}
self.attr2pairid = {}
self.obj2pairid = {}
for i, pair in enumerate(self.train_pairs):
attr, obj = pair[0], pair[1]
if attr not in self.attr2pairid.keys():
self.attr2pairid[attr] = [i]
else:
self.attr2pairid[attr].append(i)
if obj not in self.obj2pairid.keys():
self.obj2pairid[obj] = [i]
else:
self.obj2pairid[obj].append(i)
self.attr2dataid = {}
self.obj2dataid = {}
for i, pair in enumerate(self.data):
_, attr, obj = pair
if attr not in self.attr2dataid.keys():
self.attr2dataid[attr] = [i]
else:
self.attr2dataid[attr].append(i)
if obj not in self.obj2dataid.keys():
self.obj2dataid[obj] = [i]
else:
self.obj2dataid[obj].append(i)
self.embedding_dict = embedding_dict
self.att_emb_dict = None
self.obj_emb_dict = None
self.kneg = kwargs['kneg'] if 'kneg' in kwargs.keys() else None
print('# train pairs: %d | # val pairs: %d | # test pairs: %d' % (len(
self.train_pairs), len(self.val_pairs), len(self.test_pairs)))
print('# train images: %d | # val images: %d | # test images: %d' %
(len(self.train_data), len(self.val_data), len(self.test_data)))
# fix later -- affordance thing
# return {object: all attrs that occur with obj}
self.obj_affordance = {}
self.train_obj_affordance = {}
for _obj in self.objs:
candidates = [attr for (_, attr, obj) in self.train_data + self.test_data if obj == _obj]
self.obj_affordance[_obj] = sorted(list(set(candidates)))
candidates = [attr for (_, attr, obj) in self.train_data if obj == _obj]
self.train_obj_affordance[_obj] = sorted(list(set(candidates)))
if self.precompute_feat:
self.feats = np.load(file=f'{self.root}/feat_{self.phase}.npy')
def get_split_info(self):
if self.split == 'compositional-split':
data = torch.load(self.root + '/metadata.t7')
else:
data = torch.load(self.root + '/metadata-natural.t7')
train_data, val_data, test_data = [], [], []
for instance in data:
image, attr, obj, settype = instance['image'], instance[
'attr'], instance['obj'], instance['set']
if attr == 'NA' or (attr,
obj) not in self.pairs or settype == 'NA':
# ignore instances with unlabeled attributes
# ignore instances that are not in current split
continue
data_i = [image, attr, obj]
if settype == 'train':
train_data.append(data_i)
elif settype == 'val':
val_data.append(data_i)
else:
test_data.append(data_i)
return train_data, val_data, test_data
def parse_split(self):
def parse_pairs(pair_list):
with open(pair_list, 'r') as f:
pairs = f.read().strip().split('\n')
pairs = [t.split() for t in pairs]
pairs = list(map(tuple, pairs))
attrs, objs = zip(*pairs)
return attrs, objs, pairs
tr_attrs, tr_objs, tr_pairs = parse_pairs(
'%s/%s/train_pairs.txt' % (self.root, self.split))
vl_attrs, vl_objs, vl_pairs = parse_pairs(
'%s/%s/val_pairs.txt' % (self.root, self.split))
ts_attrs, ts_objs, ts_pairs = parse_pairs(
'%s/%s/test_pairs.txt' % (self.root, self.split))
all_attrs, all_objs = sorted(
list(set(tr_attrs + vl_attrs + ts_attrs))), sorted(
list(set(tr_objs + vl_objs + ts_objs)))
all_pairs = sorted(list(set(tr_pairs + vl_pairs + ts_pairs)))
return all_attrs, all_objs, all_pairs, tr_pairs, vl_pairs, ts_pairs
def sample_negative(self, attr, obj, free_sample=False, same_attr=None):
# make sure at least one primitive concept is the same
if free_sample:
while True:
new_attr, new_obj = self.train_pairs[np.random.choice(len(self.train_pairs))]
if not (new_attr == attr and new_obj == obj):
break
else:
if same_attr is None:
same_attr = (random.random() >= 0.5)
if same_attr:
new_attr = attr
candidate_id = sorted(list(set(self.attr2pairid[new_attr]).difference(set(self.obj2pairid[obj]))))
else:
new_obj = obj
candidate_id = sorted(list(set(self.obj2pairid[new_obj]).difference(set(self.attr2pairid[attr]))))
if len(candidate_id) > 0:
new_id = random.sample(candidate_id, 1)[0]
new_attr, new_obj = self.train_pairs[new_id]
else:
# however, if that fails, fall back to free sample
while True:
new_attr, new_obj = self.train_pairs[np.random.choice(len(self.train_pairs))]
if not (new_attr == attr and new_obj == obj):
break
# select an image with category (new_attr, new_obj)
data_candidate_id = sorted(list(set(self.attr2dataid[new_attr]).intersection(self.obj2dataid[new_obj])))
data_id = np.random.choice(data_candidate_id, 1)[0]
if self.precompute_feat:
img = torch.FloatTensor(self.feats[data_id]).float()
else:
img_id = self.data[data_id][0]
img = self.loader(img_id)
img = self.transform(img)
# return self.sample_negative(attr, obj)
# if new_attr != attr and new_obj != obj:
# return self.sample_negative(attr, obj)
# return (self.attr2idx[new_attr], self.obj2idx[new_obj])
return img, self.att_emb_dict[new_attr], self.obj_emb_dict[new_obj], new_attr, new_obj
def sample_negative_standalone(self, attr_idx, obj_idx):
attr = self.attrs[attr_idx]
obj = self.objs[obj_idx]
_, _, neg_attr, neg_obj = self.sample_negative(attr, obj)
return self.attr2idx[neg_attr], self.obj2idx[neg_obj], self.att_emb_dict[neg_attr], self.obj_emb_dict[neg_obj]
def sample_positive(self, attr, obj, index=None):
"""
Args:
index: if set, those indices will be excluded from final sample candidates.
Returns:
img, att_emb, obj_emb, att, obj
"""
data_candidate_id = sorted(list(set(self.attr2dataid[attr]).intersection(self.obj2dataid[obj]).difference(set([index]))))
if len(data_candidate_id) > 0:
data_id = np.random.choice(data_candidate_id, 1)[0]
else:
data_id = index
if self.precompute_feat:
img = torch.FloatTensor(self.feats[data_id]).float()
else:
img_id = self.data[data_id][0]
img = self.loader(img_id)
img = self.transform(img)
return img, self.att_emb_dict[attr], self.obj_emb_dict[obj], attr, obj
def sample_negative_by_pair_id(self, pair_id):
data = []
for i in range(len(self.train_pairs)):
if i == pair_id:
continue
att, obj = self.train_pairs[i]
att_id, obj_id = self.attr2idx[att], self.obj2idx[obj]
# data_candidate_id = list(set(self.attr2dataid[att]).intersection(self.obj2dataid[obj]))
# data_id = np.random.choice(data_candidate_id, 1)[0]
# img_id = self.data[data_id][0]
# img = self.loader(img_id)
# img = self.transform(img)
data.append([att_id, obj_id])
return data
def __getitem__(self, index):
image, attr, obj = self.data[index]
if self.precompute_feat:
img = torch.FloatTensor(self.feats[index]).float() # !!!
# if self.phase == 'train':
# img += torch.randn(img.shape) # Normal(0, 1)
else:
img = self.loader(image)
img = self.transform(img)
# data = [img, self.attr2idx[attr], self.obj2idx[obj], self.pair2idx[(attr, obj)]]
data = [img, self.att_emb_dict[attr], self.obj_emb_dict[obj],
self.attr2idx[attr], self.obj2idx[obj], attr, obj]
if self.getitem_behavior == 'train' and self.kneg is not None:
for k in range(self.kneg):
img_n, neg_attr_emb, neg_obj_emb, neg_attr, neg_obj = self.sample_negative(attr, obj, same_attr=True) # negative example for triplet loss
data += [img_n, neg_attr_emb, neg_obj_emb, self.attr2idx[neg_attr], self.obj2idx[neg_obj], neg_attr, neg_obj]
# for k in range(self.kneg):
# img_p, pos_attr_emb, pos_obj_emb, pos_attr, pos_obj = self.sample_positive(attr, obj, index) # positive example for triplet loss
# data += [img_p, pos_attr_emb, pos_obj_emb, self.attr2idx[pos_attr], self.obj2idx[pos_obj], pos_attr, pos_obj]
img_n_1, neg_attr_emb_1, neg_obj_emb_1, neg_attr_1, neg_obj_1 = self.sample_negative(attr, obj, same_attr=False) # negative example for triplet loss
data += [img_n_1, neg_attr_emb_1, neg_obj_emb_1, self.attr2idx[neg_attr_1], self.obj2idx[neg_obj_1], neg_attr_1, neg_obj_1]
# img_p, attr_emb_p, obj_emb_p, attr_p, obj_p = self.sample_positive(attr, obj, index) # negative example for triplet loss
# data += [img_p, attr_emb_p, obj_emb_p, self.attr2idx[attr_p], self.obj2idx[obj_p]]
# data += [np.array(self.sample_negative_by_pair_id(self.train_pairs.index((attr, obj))))]
return data
def __len__(self):
return len(self.data)
# for debug only
if __name__ == '__main__':
from torch.utils.data import DataLoader
from model.datasets.glove import load_glove_as_dict
train_dataset = CompositionDataset('../../data/mitstates', 'train',
embedding_dict=load_glove_as_dict('../../data/glove'))
train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=0)
data = next(iter(train_dataloader))
pass
| 12,963 | 41.785479 | 165 | py |
Relation-CZSL | Relation-CZSL-master/model/datasets/CompositionDatasetGrouped.py | from PIL import Image
import numpy as np
import torch
import torch.utils.data as tdata
import torchvision.transforms as transforms
class ImageLoader:
def __init__(self, root):
self.img_dir = root
def __call__(self, img):
file = '%s/%s' % (self.img_dir, img)
img = Image.open(file).convert('RGB')
return img
def imagenet_transform(phase):
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
if phase == 'train':
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
elif phase == 'test':
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
else:
raise ValueError('Phase can only be \"train\" or \"test\".')
return transform
class CompositionDataset(tdata.Dataset):
def __init__(self, root, phase, embedding_dict=None, split='compositional-split'):
self.root = root
self.phase = phase
self.split = split
self.feat_dim = None
self.transform = imagenet_transform(phase)
self.loader = ImageLoader(self.root + '/images/')
self.attrs, self.objs, self.pairs, self.train_pairs, self.test_pairs = self.parse_split()
assert len(set(self.train_pairs) & set(
self.test_pairs)) == 0, 'train and test are not mutually exclusive'
self.train_data, self.test_data = self.get_split_info()
self.data = self.train_data if self.phase == 'train' else self.test_data
self.attr2idx = {attr: idx for idx, attr in enumerate(self.attrs)}
self.obj2idx = {obj: idx for idx, obj in enumerate(self.objs)}
self.pair2idx = {pair: idx for idx, pair in enumerate(self.pairs)}
self.embedding_dict = embedding_dict
self.att_emb_dict = None
self.obj_emb_dict = None
print(
'# train pairs: %d | # test pairs: %d' % (len(self.train_pairs), len(self.test_pairs)))
# fix later -- affordance thing
# return {object: all attrs that occur with obj}
self.obj_affordance = {}
self.train_obj_affordance = {}
for _obj in self.objs:
candidates = [attr for (_, attr, obj) in self.train_data + self.test_data if
obj == _obj]
self.obj_affordance[_obj] = list(set(candidates))
candidates = [attr for (_, attr, obj) in self.train_data if obj == _obj]
self.train_obj_affordance[_obj] = list(set(candidates))
def get_split_info(self):
data = torch.load(self.root + '/metadata.t7')
train_pair_set = set(self.train_pairs)
train_data, test_data = [], []
for instance in data:
image, attr, obj = instance['image'], instance['attr'], instance['obj']
if attr == 'NA' or (attr, obj) not in self.pairs:
# ignore instances with unlabeled attributes
# ignore instances that are not in current split
continue
data_i = [image, attr, obj]
if (attr, obj) in train_pair_set:
train_data.append(data_i)
else:
test_data.append(data_i)
return train_data, test_data
def parse_split(self):
def parse_pairs(pair_list):
with open(pair_list, 'r') as f:
pairs = f.read().strip().split('\n')
pairs = [t.split() for t in pairs]
pairs = list(map(tuple, pairs))
attrs, objs = zip(*pairs)
return attrs, objs, pairs
tr_attrs, tr_objs, tr_pairs = parse_pairs('%s/%s/train_pairs.txt' % (self.root, self.split))
ts_attrs, ts_objs, ts_pairs = parse_pairs('%s/%s/test_pairs.txt' % (self.root, self.split))
all_attrs, all_objs = sorted(list(set(tr_attrs + ts_attrs))), sorted(
list(set(tr_objs + ts_objs)))
all_pairs = sorted(list(set(tr_pairs + ts_pairs)))
return all_attrs, all_objs, all_pairs, tr_pairs, ts_pairs
def sample_negative(self, attr, obj):
new_attr, new_obj = self.train_pairs[np.random.choice(len(self.train_pairs))]
if new_attr == attr and new_obj == obj:
return self.sample_negative(attr, obj)
# return (self.attr2idx[new_attr], self.obj2idx[new_obj])
return self.att_emb_dict[new_attr], self.obj_emb_dict[new_obj], new_attr, new_obj
def sample_negative_standalone(self, attr_idx, obj_idx):
attr = self.attrs[attr_idx]
obj = self.objs[obj_idx]
_, _, neg_attr, neg_obj = self.sample_negative(attr, obj)
return self.attr2idx[neg_attr], self.obj2idx[neg_obj], self.att_emb_dict[neg_attr], self.obj_emb_dict[neg_obj]
def __getitem__(self, index):
image, attr, obj = self.data[index]
img = self.loader(image)
img = self.transform(img)
# data = [img, self.attr2idx[attr], self.obj2idx[obj], self.pair2idx[(attr, obj)]]
data = [img, self.att_emb_dict[attr], self.obj_emb_dict[obj],
self.attr2idx[attr], self.obj2idx[obj], attr, obj]
if self.phase == 'train':
neg_attr_emb, neg_obj_emb, neg_attr, neg_obj = self.sample_negative(attr, obj) # negative example for triplet loss
data += [neg_attr_emb, neg_obj_emb, self.attr2idx[neg_attr], self.obj2idx[neg_obj]]
return data
def __len__(self):
return len(self.data)
# for debug only
if __name__ == '__main__':
from torch.utils.data import DataLoader
from model.datasets.glove import load_glove_as_dict
train_dataset = CompositionDataset('../../data/mitstates', 'train',
embedding_dict=load_glove_as_dict('../../data/glove'))
train_dataloader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=0)
data = next(iter(train_dataloader))
pass
| 6,104 | 36.22561 | 127 | py |
Relation-CZSL | Relation-CZSL-master/model/misc/utils.py | import sys
import os
import time
import subprocess
import inspect
import logging
import argparse
from contextlib import contextmanager
from timeit import default_timer
import matplotlib.pyplot as plt
import torch
import random
import uuid
import numpy as np
import xmltodict
# ---------- debugging ---------- #
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads = []
layers = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
layers.append(n)
if p.grad is not None:
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
print(f'layer: {n}, ave grad: {ave_grads[-1]:.8f}, max grad: {max_grads[-1]:.8f}.')
else:
print(f'layer: {n} has no grad.')
ave_grads.append(-1.)
max_grads.append(-1.)
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="b")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="g")
plt.hlines(0, 0, len(ave_grads)+1, lw=2, color="k")
plt.xticks(range(0, len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom=-0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.savefig('grad.local.png', bbox_inches='tight')
# ---------- benchmark ---------- #
@contextmanager
def elapsed_timer():
start = default_timer()
elapser = lambda: default_timer() - start
yield lambda: elapser()
end = default_timer()
elapser = lambda: end-start
# ---------- checkpoint handling ---------- #
def load_parallel_state_dict(state_dict):
"""Remove the module.xxx in the keys for models trained
using data_parallel.
Returns:
new_state_dict
"""
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def save_checkpoint(path, **kwargs):
torch.save(kwargs, path)
def load_checkpoint(path, state_dict_to_load=None, from_parallel=False):
"""Load checkpoint from path
Args:
path :str: path of checkpoint file.
state_dict_to_load :[]: keys of states to load. Set it to None if checkpoint has only
Returns:
checkpoint :dict of state_dicts:
"""
checkpoint = torch.load(path)
if from_parallel:
checkpoint['model'] = load_parallel_state_dict(checkpoint['model'])
if state_dict_to_load is None:
return checkpoint
if set(state_dict_to_load) != set(list(checkpoint.keys())):
logging.warning(f'Checkpoint key mismatch. '
f'Requested {set(state_dict_to_load)}, found {set(list(checkpoint.keys()))}.')
return checkpoint
def prepare_train(model, optimizer, lr_scheduler, args, **kwargs):
"""Do the dirty job of loading model/model weights, optimizer and lr_scheduler states from saved state-dicts.
If args.from_model is set, the states will be fully recovered.
If args.load_model_weight is set instead, only model weight will be loaded. Optimizer and lr_scheduler will not be loaded.
Args:
model, optimizer, lr_scheduler
args: argument returned by init()
If args.finetune is set:
kwargs['finetune_old_head'] :torch.nn.Module: head the model that is to be replaced
kwargs['finetune_new_head'] :torch.nn.Module: new head that will be appended to model
Returns:
model, optimizer, lr_scheduler
"""
if args.from_model:
state_dict = load_checkpoint(args.from_model)
if 'checkpoint_epoch' in state_dict.keys():
args.start_epoch = state_dict['checkpoint_epoch'] + 1
if 'model' in state_dict.keys():
if not args.parallel:
model.load_state_dict(state_dict['model'])
else:
model.load_state_dict(
load_parallel_state_dict(state_dict['model']))
else:
if not args.parallel:
model.load_state_dict(state_dict)
else:
model.load_state_dict(load_parallel_state_dict(state_dict))
# if --finetune is set, the head is reset to a new 1x1 conv layer
if args.finetune:
setattr(model, kwargs['finetune_old_head'], kwargs['finetune_new_head'])
if 'optimizer' in state_dict.keys():
optimizer.load_state_dict(state_dict['optimizer'])
if 'initial_lr' in state_dict.keys():
optimizer.param_groups[0]['initial_lr'] = state_dict['initial_lr']
else:
optimizer.param_groups[0]['initial_lr'] = args.lr
if 'lr_scheduler' in state_dict.keys():
lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
if args.load_weight_from and not args.from_model:
state_dict = load_checkpoint(args.load_weight_from)
if 'model' in state_dict.keys():
if not args.parallel:
model.load_state_dict(state_dict['model'])
else:
model.load_state_dict(
load_parallel_state_dict(state_dict['model']))
else:
if not args.parallel:
model.load_state_dict(state_dict)
else:
model.load_state_dict(load_parallel_state_dict(state_dict))
if args.parallel:
model = torch.nn.DataParallel(model)
model = model.to(args.device)
return model, optimizer, lr_scheduler
# ---------- data handling ---------- #
def longtensor_to_one_hot(labels, num_classes):
"""convert int encoded label to one-hot encoding
Args:
labels :[batch_size, 1]:
num_classes :int:
Returns:
one-hot encoded label :[batch_size, num_classes]:
"""
return torch.zeros(labels.shape[0], num_classes).scatter_(1, labels, 1)
# ---------- training ---------- #
class EarlyStop:
def __init__(self, patience: int, verbose: bool = True):
self.patience = patience
self.init_patience = patience
self.verbose = verbose
self.lowest_loss = 9999999.999
self.highest_acc = 0.0
def step(self, loss=None, acc=None, criterion=lambda x1, x2: x1 or x2):
if loss is None:
loss = self.lowest_loss
better_loss = True
else:
better_loss = (loss < self.lowest_loss) and ((self.lowest_loss-loss)/self.lowest_loss > 0.01)
if acc is None:
acc = self.highest_acc
better_acc = True
else:
better_acc = acc > self.highest_acc
if better_loss:
self.lowest_loss = loss
if better_acc:
self.highest_acc = acc
if criterion(better_loss, better_acc):
self.patience = self.init_patience
if self.verbose:
logging.getLogger(myself()).debug(
'Remaining patience: {}'.format(self.patience))
return False
else:
self.patience -= 1
if self.verbose:
logging.getLogger(myself()).debug(
'Remaining patience: {}'.format(self.patience))
if self.patience < 0:
if self.verbose:
logging.getLogger(myself()).warning('Ran out of patience.')
return True
class ShouldSaveModel:
def __init__(self, init_step=-1):
"""
Args:
init_step :int: start_epoch - 1
"""
self.lowest_loss = 999999.999
self.highest_acc = 0.0
self.current_step = init_step
self.best_step = init_step
def step(self, loss=None, acc=None, criterion=lambda x1, x2: x1 or x2):
"""
Decides whether a model should be saved, based on the criterion.
Args:
loss :float: loss after current epoch.
acc :float: acc after current epoch.
criterion :callable: a function that takes two params and returns a bool.
Returns:
:bool: whether this model should be saved.
"""
self.current_step += 1
if loss is None:
loss = self.lowest_loss
better_loss = True
else:
better_loss = (loss < self.lowest_loss) and ((self.lowest_loss-loss)/self.lowest_loss > 0.01)
if acc is None:
acc = self.highest_acc
better_acc = True
else:
better_acc = acc > self.highest_acc
if better_loss:
self.lowest_loss = loss
if better_acc:
self.highest_acc = acc
if criterion(better_loss, better_acc):
logging.getLogger(myself()).info(
f'New model: epoch: {self.current_step}, highest acc: {acc:.4}, lowest loss: {loss:.4}.')
self.best_step = self.current_step
return True
else:
return False
class RunningAverage:
def __init__(self, window_size, initial_step=0):
self.data = np.zeros([window_size, 1])
self.window_size = window_size
self.step = initial_step
self.idx = -1
def value(self):
try:
return self.data.sum() / self.step
except ZeroDivisionError:
return 0
def add(self, d):
self.idx = (self.idx + 1) % self.window_size
self.data[self.idx] = d
self.step += 1
return self.data.mean()
# ---------- environment setup and logging ---------- #
def myself():
return inspect.stack()[1][3]
def get_usable_gpu(threshold=2048, gpu_id_remap=None):
"""Find a usable gpu
Args:
threshold :int: required GPU free memory.
gpu_id_remap :[int]: in cases where GPU IDs mess up, use a remap
Returns:
GPU ID :int:, or
:None: if no GPU is found
"""
gpu_id = None
try:
gpu_info = xmltodict.parse(subprocess.check_output(
['nvidia-smi', '-x', '-q']))['nvidia_smi_log']
free_mem = []
if type(gpu_info['gpu']) is list:
for gpu in gpu_info['gpu']:
free_mem.append(int(gpu['fb_memory_usage']['free'].split()[0]))
else:
free_mem.append(
int(gpu_info['gpu']['fb_memory_usage']['free'].split()[0]))
gpu_id = np.argmax(free_mem)
best_memory = free_mem[gpu_id]
if gpu_id_remap:
gpu_id = gpu_id_remap[gpu_id]
if best_memory < threshold:
gpu_id = None
except Exception as e:
print(e)
return gpu_id
def wait_gpu(req_mem=8000, id_map=None):
wait_time = int(random.random() * 30)
time.sleep(wait_time)
while True:
gpu_id = get_usable_gpu(req_mem, id_map)
if gpu_id is not None:
break
time.sleep(30)
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
def config_logger(log_file=None):
if log_file is not None:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)-8.8s] (%(name)-8.8s %(filename)15.15s:%(lineno)5.5d) %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter(
'%(asctime)s [%(levelname)-8.8s] (%(name)-8.8s %(filename)15.15s:%(lineno)5.5d) %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
else:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)-8.8s] (%(name)-8.8s %(filename)15.15s:%(lineno)5.5d) %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout)
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logging.getLogger('').critical("Uncaught exception",
exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
def append_test_args(p):
"""Append arguments for model testing
Args:
p :argparse.ArgumentParser object:
Returns
parameters :argparse.ArgumentParser object: with appended arguments
"""
p.add_argument('--from_model', '--from-model',
nargs='+', type=str, required=True)
p.add_argument('--cuda', action='store_true', default=False)
p.add_argument('--parallel', action='store_true', default=False)
p.add_argument('--num_workers', '--num-workers', type=int, default=2)
p.add_argument('--batch_size', '--batch-size', type=int, default=32)
p.add_argument('--dataset', choices=['mitstates', 'ut-zap50k'], required=True,
help='Dataset for training and testing.')
return p
def create_parser(user_param=None):
"""Create the basic argument parser for environment setup.
Args:
user_param :callable: a function that takes and returns an ArgumentParser. Can be used to add user parameters.
Return:
p :argparse.ArgumentParser:
"""
p = argparse.ArgumentParser(description='input arguments.')
p.add_argument('--no-pbar', action='store_true',
default=False, help='Subpress progress bar.')
p.add_argument('--log_dir', default=None)
p.add_argument('--debug_mode', '--debug-mode', action='store_true', default=False)
p.add_argument('--summary_to', type=str, default=None)
p.add_argument('--uuid', default=None,
help='UUID of the model. Will be generated automatically if unspecified.')
p.add_argument('--cuda', action='store_true', default=False,
help='Flag for cuda. Will be automatically determined if unspecified.')
p.add_argument('--device', choices=['cpu', 'cuda'], default='cuda',
help='Flag for cuda. Will be automatically determined if unspecified.')
p.add_argument('--parallel', action='store_true', default=False,
help='Flag for parallel.')
p.add_argument('--start_epoch', type=int, default=0)
p.add_argument('--max_epoch', type=int, default=100)
p.add_argument('--from_model', '--from-model', type=str, default=None,
help='Load model, optimizer, lr_scheduler state from path.')
p.add_argument('--finetune', action='store_true', default=False)
p.add_argument('--load_weight_from', '--load-weight-from', type=str, default=None,
help='Load model state from path. This will invalidate --finetune flag.')
p.add_argument('--seed', type=int, default=0)
p.add_argument('--test_only', '--test-only', action='store_true', default=False,
help='Disable training. Model will only be tested.')
p.add_argument('--save_model_to', type=str, default='./snapshots/')
p.add_argument('--patience', type=int, default=10,
help='Number of epochs to continue when test acc stagnants.')
if user_param:
p = user_param(p)
if type(p) != argparse.ArgumentParser:
raise ValueError(
f'user_param must return an ArgumentParser object, found {type(p)} instead.')
return p
def worker_init_fn_seed(args):
def worker_init_fn(x):
seed = x + args.seed
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
return
return worker_init_fn
def set_randomness(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def init(user_param=None, user_args_modifier=None):
"""Parse and return arguments, prepare the environment. Set up logging.
Args:
user_param :callable: append user parameters to the argument parser.
user_args_modifier :callable: override parsed arguments.
Returns:
args :Namespace: of arguments
"""
# parse input arguments
parser = create_parser(user_param)
args = parser.parse_args()
# detect CUDA
args.cuda = torch.cuda.is_available() if args.cuda is None else args.cuda
# detect my hostname and gpu id
hostname = subprocess.check_output(
"hostname", shell=True).decode("utf-8")[:-1]
git_commit = subprocess.check_output(
['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8')[:-1]
gpu_id = os.getenv("CUDA_VISIBLE_DEVICES") if args.cuda else 'null'
date_time = subprocess.check_output(['date', '-Iminutes']).decode('utf-8')[:-7]
args.git_commit = git_commit
# randomness control
set_randomness(args.seed)
# model ID
# if debug_mode flag is set, all logs will be saved to debug/model_id folder,
# otherwise will be saved to runs/model_id folder
if not args.debug_mode:
args.model_id = args.uuid if args.uuid is not None else f'{str(uuid.uuid4().hex)[:8]}_{hostname}_{gpu_id}_{os.getpid()}_{git_commit}_{date_time}'
args.summary_to = args.summary_to if args.summary_to is not None else f'./runs/{args.model_id}/'
else:
args.model_id = f'debug_{hostname}_{gpu_id}_{os.getpid()}_{git_commit}'
args.summary_to = 'debug'
# create model save path
if not args.test_only:
# create logger
if args.log_dir is None:
args.log_dir = os.path.join(args.save_model_to, args.model_id)
if not os.path.exists(args.save_model_to):
os.mkdir(args.save_model_to)
if not os.path.exists(args.log_dir):
os.mkdir(args.log_dir)
if not os.path.exists(os.path.join(args.save_model_to, args.model_id)):
os.mkdir(os.path.join(args.save_model_to, args.model_id))
config_logger(os.path.join(args.log_dir, args.model_id + '.log'))
# log
logging.getLogger(myself()).info(
f'Model {args.model_id}, running in {sys.argv[0]}, code revision {git_commit}')
for arg in vars(args):
logging.getLogger(myself()).debug(
f'{arg:<30s} = {str(getattr(args, arg)):<30s}')
else:
print(f'Model {args.model_id}, running with {sys.argv[0]}')
for arg in vars(args):
print(f'{arg:<30s} = {str(getattr(args, arg)):<30s}')
if user_args_modifier:
args = user_args_modifier(args)
if not args:
raise ValueError('user_args_modifier must return args, not None.')
return args
# ---------- Debug use only ---------- #
if __name__ == '__main__':
get_usable_gpu()
pass
| 19,599 | 34.571688 | 153 | py |
IVR | IVR-main/actor.py | from typing import Tuple
import jax
import jax.numpy as jnp
from common import Batch, InfoDict, Model, Params, PRNGKey
def update_actor(key: PRNGKey, actor: Model, critic: Model, value: Model,
batch: Batch, alpha: float, alg: str) -> Tuple[Model, InfoDict]:
v = value(batch.observations)
q1, q2 = critic(batch.observations, batch.actions)
q = jnp.minimum(q1, q2)
if alg == 'SQL':
weight = q - v
weight = jnp.maximum(weight, 0)
elif alg == 'EQL':
weight = jnp.exp(10 * (q - v) / alpha)
weight = jnp.clip(weight, 0, 100.)
def actor_loss_fn(actor_params: Params) -> Tuple[jnp.ndarray, InfoDict]:
dist = actor.apply({'params': actor_params},
batch.observations,
training=True,
rngs={'dropout': key})
log_probs = dist.log_prob(batch.actions)
actor_loss = -(weight * log_probs).mean()
return actor_loss, {'actor_loss': actor_loss}
new_actor, info = actor.apply_gradient(actor_loss_fn)
return new_actor, info | 1,100 | 31.382353 | 81 | py |
IVR | IVR-main/learner.py | """Implementations of algorithms for continuous control."""
from typing import Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import optax
import policy
import value_net
from common import Batch, InfoDict, Model, PRNGKey
from actor import update_actor
from critic import update_q, update_v
def target_update(critic: Model, target_critic: Model, tau: float) -> Model:
new_target_params = jax.tree_util.tree_map(
lambda p, tp: p * tau + tp * (1 - tau), critic.params,
target_critic.params)
return target_critic.replace(params=new_target_params)
@jax.jit
def _update_jit_sql(
rng: PRNGKey, actor: Model, critic: Model,
value: Model, target_critic: Model, batch: Batch, discount: float, tau: float,
alpha: float
) -> Tuple[PRNGKey, Model, Model, Model, Model, Model, InfoDict]:
new_value, value_info = update_v(target_critic, value, batch, alpha, alg='SQL')
key, rng = jax.random.split(rng)
new_actor, actor_info = update_actor(key, actor, target_critic,
new_value, batch, alpha, alg='SQL')
new_critic, critic_info = update_q(critic, new_value, batch, discount)
new_target_critic = target_update(new_critic, target_critic, tau)
return rng, new_actor, new_critic, new_value, new_target_critic, {
**critic_info,
**value_info,
**actor_info
}
@jax.jit
def _update_jit_eql(
rng: PRNGKey, actor: Model, critic: Model,
value: Model, target_critic: Model, batch: Batch, discount: float, tau: float,
alpha: float
) -> Tuple[PRNGKey, Model, Model, Model, Model, Model, InfoDict]:
new_value, value_info = update_v(target_critic, value, batch, alpha, alg='EQL')
key, rng = jax.random.split(rng)
new_actor, actor_info = update_actor(key, actor, target_critic,
new_value, batch, alpha, alg='EQL')
new_critic, critic_info = update_q(critic, new_value, batch, discount)
new_target_critic = target_update(new_critic, target_critic, tau)
return rng, new_actor, new_critic, new_value, new_target_critic, {
**critic_info,
**value_info,
**actor_info
}
class Learner(object):
def __init__(self,
seed: int,
observations: jnp.ndarray,
actions: jnp.ndarray,
actor_lr: float = 3e-4,
value_lr: float = 3e-4,
critic_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
alpha: float = 0.1,
dropout_rate: Optional[float] = None,
value_dropout_rate: Optional[float] = None,
layernorm: bool = False,
max_steps: Optional[int] = None,
max_clip: Optional[int] = None,
mix_dataset: Optional[str] = None,
alg: Optional[str] = None,
opt_decay_schedule: str = "cosine"):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290
"""
# self.expectile = expectile
self.tau = tau
self.discount = discount
self.alpha = alpha
self.max_clip = max_clip
self.alg = alg
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, value_key = jax.random.split(rng, 4)
action_dim = actions.shape[-1]
actor_def = policy.NormalTanhPolicy(hidden_dims,
action_dim,
log_std_scale=1e-3,
log_std_min=-5.0,
dropout_rate=dropout_rate,
state_dependent_std=False,
tanh_squash_distribution=False)
if opt_decay_schedule == "cosine":
schedule_fn = optax.cosine_decay_schedule(-actor_lr, max_steps)
optimiser = optax.chain(optax.scale_by_adam(),
optax.scale_by_schedule(schedule_fn))
else:
optimiser = optax.adam(learning_rate=actor_lr)
actor = Model.create(actor_def,
inputs=[actor_key, observations],
tx=optimiser)
critic_def = value_net.DoubleCritic(hidden_dims)
critic = Model.create(critic_def,
inputs=[critic_key, observations, actions],
tx=optax.adam(learning_rate=critic_lr))
value_def = value_net.ValueCritic(hidden_dims, layer_norm=layernorm, dropout_rate=value_dropout_rate)
value = Model.create(value_def,
inputs=[value_key, observations],
tx=optax.adam(learning_rate=value_lr))
target_critic = Model.create(
critic_def, inputs=[critic_key, observations, actions])
self.actor = actor
self.critic = critic
self.value = value
self.target_critic = target_critic
self.rng = rng
def sample_actions(self,
observations: np.ndarray,
temperature: float = 1.0) -> jnp.ndarray:
rng, actions = policy.sample_actions(self.rng, self.actor.apply_fn,
self.actor.params, observations,
temperature)
self.rng = rng
actions = np.asarray(actions)
return np.clip(actions, -1, 1)
def update(self, batch: Batch) -> InfoDict:
# type <class 'str'> is not a valid JAX type.
if self.alg == 'SQL':
new_rng, new_actor, new_critic, new_value, new_target_critic, info = _update_jit_sql(
self.rng, self.actor, self.critic, self.value, self.target_critic,
batch, self.discount, self.tau, self.alpha)
elif self.alg == 'EQL':
new_rng, new_actor, new_critic, new_value, new_target_critic, info = _update_jit_eql(
self.rng, self.actor, self.critic, self.value, self.target_critic,
batch, self.discount, self.tau, self.alpha)
self.rng = new_rng
self.actor = new_actor
self.critic = new_critic
self.value = new_value
self.target_critic = new_target_critic
return info
| 6,552 | 37.547059 | 109 | py |
IVR | IVR-main/policy.py | import functools
from typing import Optional, Sequence, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
from common import MLP, Params, PRNGKey, default_init
LOG_STD_MIN = -10.0
LOG_STD_MAX = 2.0
class NormalTanhPolicy(nn.Module):
hidden_dims: Sequence[int]
action_dim: int
state_dependent_std: bool = True
dropout_rate: Optional[float] = None
log_std_scale: float = 1.0
log_std_min: Optional[float] = None
log_std_max: Optional[float] = None
tanh_squash_distribution: bool = True
@nn.compact
def __call__(self,
observations: jnp.ndarray,
temperature: float = 1.0,
training: bool = False) -> tfd.Distribution:
outputs = MLP(self.hidden_dims,
activate_final=True,
dropout_rate=self.dropout_rate)(observations,
training=training)
means = nn.Dense(self.action_dim, kernel_init=default_init())(outputs)
if self.state_dependent_std:
log_stds = nn.Dense(self.action_dim,
kernel_init=default_init(
self.log_std_scale))(outputs)
else:
log_stds = self.param('log_stds', nn.initializers.zeros,
(self.action_dim, ))
log_std_min = self.log_std_min or LOG_STD_MIN
log_std_max = self.log_std_max or LOG_STD_MAX
log_stds = jnp.clip(log_stds, log_std_min, log_std_max)
if not self.tanh_squash_distribution:
means = nn.tanh(means)
base_dist = tfd.MultivariateNormalDiag(loc=means,
scale_diag=jnp.exp(log_stds) *
temperature)
if self.tanh_squash_distribution:
return tfd.TransformedDistribution(distribution=base_dist,
bijector=tfb.Tanh())
else:
return base_dist
@functools.partial(jax.jit, static_argnames=('actor_def', 'distribution'))
def _sample_actions(rng: PRNGKey,
actor_def: nn.Module,
actor_params: Params,
observations: np.ndarray,
temperature: float = 1.0) -> Tuple[PRNGKey, jnp.ndarray]:
dist = actor_def.apply({'params': actor_params}, observations, temperature)
rng, key = jax.random.split(rng)
return rng, dist.sample(seed=key)
def sample_actions(rng: PRNGKey,
actor_def: nn.Module,
actor_params: Params,
observations: np.ndarray,
temperature: float = 1.0) -> Tuple[PRNGKey, jnp.ndarray]:
return _sample_actions(rng, actor_def, actor_params, observations,
temperature)
| 2,998 | 34.702381 | 79 | py |
IVR | IVR-main/common.py | import collections
import os
from typing import Any, Callable, Dict, Optional, Sequence, Tuple
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
import optax
Batch = collections.namedtuple(
'Batch',
['observations', 'actions', 'rewards', 'masks', 'next_observations', 'next_actions'])
def default_init(scale: Optional[float] = jnp.sqrt(2)):
return nn.initializers.orthogonal(scale)
PRNGKey = Any
Params = flax.core.FrozenDict[str, Any]
PRNGKey = Any
Shape = Sequence[int]
Dtype = Any # this could be a real type?
InfoDict = Dict[str, float]
class MLP(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
activate_final: int = False
layer_norm: bool = False
dropout_rate: Optional[float] = None
@nn.compact
def __call__(self, x: jnp.ndarray, training: bool = False) -> jnp.ndarray:
for i, size in enumerate(self.hidden_dims):
x = nn.Dense(size, kernel_init=default_init())(x)
if i + 1 < len(self.hidden_dims) or self.activate_final:
if self.layer_norm:
x = nn.LayerNorm()(x)
x = self.activations(x)
if self.dropout_rate is not None and self.dropout_rate > 0:
x = nn.Dropout(rate=self.dropout_rate)(
x, deterministic=not training)
return x
@flax.struct.dataclass
class Model:
step: int
apply_fn: nn.Module = flax.struct.field(pytree_node=False)
params: Params
tx: Optional[optax.GradientTransformation] = flax.struct.field(
pytree_node=False)
opt_state: Optional[optax.OptState] = None
@classmethod
def create(cls,
model_def: nn.Module,
inputs: Sequence[jnp.ndarray],
tx: Optional[optax.GradientTransformation] = None) -> 'Model':
variables = model_def.init(*inputs)
_, params = variables.pop('params')
if tx is not None:
opt_state = tx.init(params)
else:
opt_state = None
return cls(step=1,
apply_fn=model_def,
params=params,
tx=tx,
opt_state=opt_state)
def __call__(self, *args, **kwargs):
return self.apply_fn.apply({'params': self.params}, *args, **kwargs)
def apply(self, *args, **kwargs):
return self.apply_fn.apply(*args, **kwargs)
def apply_gradient(self, loss_fn) -> Tuple[Any, 'Model']:
grad_fn = jax.grad(loss_fn, has_aux=True)
grads, info = grad_fn(self.params)
updates, new_opt_state = self.tx.update(grads, self.opt_state,
self.params)
new_params = optax.apply_updates(self.params, updates)
return self.replace(step=self.step + 1,
params=new_params,
opt_state=new_opt_state), info
def save(self, save_path: str):
os.makedirs(os.path.dirname(save_path), exist_ok=True)
with open(save_path, 'wb') as f:
f.write(flax.serialization.to_bytes(self.params))
def load(self, load_path: str) -> 'Model':
with open(load_path, 'rb') as f:
params = flax.serialization.from_bytes(self.params, f.read())
return self.replace(params=params)
| 3,375 | 31.152381 | 89 | py |
IVR | IVR-main/value_net.py | from typing import Callable, Sequence, Tuple, Optional
import jax.numpy as jnp
from flax import linen as nn
from common import MLP
class ValueCritic(nn.Module):
hidden_dims: Sequence[int]
layer_norm: bool = False
dropout_rate: Optional[float] = 0.0
@nn.compact
def __call__(self, observations: jnp.ndarray) -> jnp.ndarray:
critic = MLP((*self.hidden_dims, 1), layer_norm=self.layer_norm, dropout_rate=self.dropout_rate)(observations)
return jnp.squeeze(critic, -1)
class Critic(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
layer_norm: bool = False
@nn.compact
def __call__(self, observations: jnp.ndarray,
actions: jnp.ndarray) -> jnp.ndarray:
inputs = jnp.concatenate([observations, actions], -1)
critic = MLP((*self.hidden_dims, 1),
layer_norm=self.layer_norm,
activations=self.activations)(inputs)
return jnp.squeeze(critic, -1)
class DoubleCritic(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[[jnp.ndarray], jnp.ndarray] = nn.relu
layer_norm: bool = False
@nn.compact
def __call__(self, observations: jnp.ndarray,
actions: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
critic1 = Critic(self.hidden_dims,
activations=self.activations,
layer_norm=self.layer_norm)(observations, actions)
critic2 = Critic(self.hidden_dims,
activations=self.activations,
layer_norm=self.layer_norm)(observations, actions)
return critic1, critic2
| 1,717 | 32.686275 | 118 | py |
IVR | IVR-main/evaluation.py | from typing import Dict
import flax.linen as nn
import gym
import numpy as np
import d4rl
# from mingpt.utils import sample
# import atari_py
from collections import deque
import random
# import cv2
# import torch
def evaluate(env_name: str, agent: nn.Module, env: gym.Env,
num_episodes: int) -> Dict[str, float]:
# stats = {'return': [], 'length': []}
total_reward_ = []
for _ in range(num_episodes):
observation, done = env.reset(), False
total_reward = 0.
while not done:
action = agent.sample_actions(observation, temperature=0.0)
observation, reward, done, info = env.step(action)
total_reward += reward
total_reward_.append(total_reward)
average_return = np.array(total_reward_).mean()
normalized_return = d4rl.get_normalized_score(env_name, average_return) * 100
return normalized_return
| 906 | 26.484848 | 81 | py |
IVR | IVR-main/critic.py | from typing import Tuple
import jax.numpy as jnp
from common import PRNGKey
import policy
import jax
from common import Batch, InfoDict, Model, Params
def update_v(critic: Model, value: Model, batch: Batch,
alpha: float, alg: str) -> Tuple[Model, InfoDict]:
q1, q2 = critic(batch.observations, batch.actions)
q = jnp.minimum(q1, q2)
def value_loss_fn(value_params: Params) -> Tuple[jnp.ndarray, InfoDict]:
v = value.apply({'params': value_params}, batch.observations)
if alg == 'SQL':
sp_term = (q - v) / (2 * alpha) + 1.0
sp_weight = jnp.where(sp_term > 0, 1., 0.)
value_loss = (sp_weight * (sp_term**2) + v / alpha).mean()
elif alg == 'EQL':
sp_term = (q - v) / alpha
sp_term = jnp.minimum(sp_term, 5.0)
max_sp_term = jnp.max(sp_term, axis=0)
max_sp_term = jnp.where(max_sp_term < -1.0, -1.0, max_sp_term)
max_sp_term = jax.lax.stop_gradient(max_sp_term)
value_loss = (jnp.exp(sp_term - max_sp_term) + jnp.exp(-max_sp_term) * v / alpha).mean()
else:
raise NotImplementedError('please choose SQL or EQL')
return value_loss, {
'value_loss': value_loss,
'v': v.mean(),
'q-v': (q - v).mean(),
}
new_value, info = value.apply_gradient(value_loss_fn)
return new_value, info
def update_q(critic: Model, value: Model,
batch: Batch, discount: float) -> Tuple[Model, InfoDict]:
next_v = value(batch.next_observations)
target_q = batch.rewards + discount * batch.masks * next_v
def critic_loss_fn(critic_params: Params) -> Tuple[jnp.ndarray, InfoDict]:
q1, q2 = critic.apply({'params': critic_params}, batch.observations,
batch.actions)
critic_loss = ((q1 - target_q)**2 + (q2 - target_q)**2).mean()
return critic_loss, {
'critic_loss': critic_loss,
'q1': q1.mean()
}
new_critic, info = critic.apply_gradient(critic_loss_fn)
return new_critic, info | 2,107 | 35.982456 | 100 | py |
id-reveal | id-reveal-main/network.py | import torch
def add_tensor_1d(x, y):
s1 = (y.shape[-1] - x.shape[-1]) // 2
e1 = s1 + x.shape[-1]
y = y[..., s1:e1]
if x.shape[1] > y.shape[1]:
d = [int(i) for i in y.shape]
d[1] = int(x.shape[1] - y.shape[1])
y = torch.cat((y, torch.zeros(d, dtype=y.dtype, device=y.device)), -3)
return x + y
def torch_nanmean_var(x, dim=None, keepdim=True):
pos = torch.isnan(x) == False
y = torch.where(pos, x, torch.zeros_like(x))
d = pos.sum(dim, keepdim=keepdim)
m = y.sum(dim, keepdim=keepdim) / d
v = (y ** 2).sum(dim, keepdim=keepdim) / d - (m ** 2)
return m, v
class GroupNanNorm(torch.nn.Module):
def __init__(self, num_groups: int, num_channels: int, eps: float = 1e-5, affine: bool = True) -> None:
super(GroupNanNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = torch.nn.Parameter(torch.Tensor(num_channels))
self.bias = torch.nn.Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
assert (self.num_channels % self.num_groups) == 0
def reset_parameters(self) -> None:
if self.affine:
torch.nn.modules.normalization.init.ones_(self.weight)
torch.nn.modules.normalization.init.zeros_(self.bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
shape = x.shape
assert shape[1] == self.num_channels
x = x.view((int(shape[0]), self.num_groups, int(shape[1]) // self.num_groups, -1))
m, v = torch_nanmean_var(x, (-2, -1), keepdim=True)
x = (x - m) * torch.rsqrt(v + self.eps)
x = x.view((int(shape[0]), int(shape[1]), -1))
if self.affine:
x = x * self.weight[None, :, None] + self.bias[None, :, None]
x = x.view(shape)
return x
def extra_repr(self) -> str:
return '{num_groups}, {num_channels}, eps={eps}, ' \
'affine={affine}'.format(**self.__dict__)
class ResidialBlock1D(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size, group_size=16, stride=1,
padding=0, dilation=1, residue=True, negative_slope=0.02, nangroup=True):
super(ResidialBlock1D, self).__init__()
self.residue = residue
self.conv = torch.nn.Conv1d(input_size, output_size, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=True)
if nangroup:
self.norm = GroupNanNorm(num_groups=output_size // group_size, num_channels=output_size, affine=True)
else:
self.norm = torch.nn.GroupNorm(num_groups=output_size // group_size, num_channels=output_size, affine=True)
self.act = torch.nn.LeakyReLU(negative_slope)
def forward(self, x):
y = self.norm(self.conv(x))
if self.residue:
y = add_tensor_1d(y, x)
return self.act(y)
def DeepNetwork1D(input_size=62, output_size=128, hidden_size=512,
kernels =[1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
dilations=[1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 4],
last_act=False, flag_residue_first=False, nangroup=True):
list_module = list()
for index in range(len(dilations)-1):
block_input_size = input_size if index == 0 else hidden_size
block_dilation = dilations[index]
block_kernel = kernels[index]
block_residue = flag_residue_first if index == 0 else True
module = ResidialBlock1D(block_input_size, hidden_size, kernel_size=block_kernel, group_size=16, stride=1,
padding=0, dilation=block_dilation, residue=block_residue, negative_slope=0.02, nangroup=nangroup)
list_module.append(module)
block_dilation = dilations[-1]
block_kernel = kernels[-1]
if last_act:
module = ResidialBlock1D(hidden_size, output_size, kernel_size=block_kernel, group_size=16, stride=1,
padding=0, dilation=block_dilation, residue=False, negative_slope=0.02, nangroup=nangroup)
list_module.append(module)
else:
module = torch.nn.Conv1d(hidden_size, output_size, block_kernel, stride=1, padding=0, dilation=block_dilation, bias=True)
list_module.append(module)
return torch.nn.Sequential(*list_module)
class IDreveal():
def __init__(self, time, device='cpu', weights_file='./model.th'):
self.time = time
self.device = device
self.network = DeepNetwork1D(62, 128, 512,
kernels =[1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
dilations=[1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 4],
last_act=False, flag_residue_first=False, nangroup=True).to(device)
dat = torch.load(weights_file, map_location=self.device)
self.norm = dat['norm']
self.network.load_state_dict(dat['network'])
self.network.eval()
def __call__(self, data, stride=1):
with torch.no_grad():
time = self.time
htime = (time-50)//2
data = torch.from_numpy(data)/self.norm
data = data.unfold(0, time, stride)
assert data.shape[1]==62
assert data.shape[2]==time
data = torch.split(data, 512, dim=0)
data = [self.network(x.to(self.device))[:,:,htime].cpu() for x in data]
data = torch.cat(data,0).numpy()
return data
| 5,638 | 40.463235 | 135 | py |
DeepMoji | DeepMoji-master/deepmoji/class_avg_finetuning.py | """ Class average finetuning functions. Before using any of these finetuning
functions, ensure that the model is set up with nb_classes=2.
"""
from __future__ import print_function
import sys
import uuid
import numpy as np
from os.path import dirname
from time import sleep
from keras.optimizers import Adam
from global_variables import (
FINETUNING_METHODS,
WEIGHTS_DIR)
from finetuning import (
freeze_layers,
sampling_generator,
finetuning_callbacks,
train_by_chain_thaw,
find_f1_threshold)
def relabel(y, current_label_nr, nb_classes):
""" Makes a binary classification for a specific class in a
multi-class dataset.
# Arguments:
y: Outputs to be relabelled.
current_label_nr: Current label number.
nb_classes: Total number of classes.
# Returns:
Relabelled outputs of a given multi-class dataset into a binary
classification dataset.
"""
# Handling binary classification
if nb_classes == 2 and len(y.shape) == 1:
return y
y_new = np.zeros(len(y))
y_cut = y[:, current_label_nr]
label_pos = np.where(y_cut == 1)[0]
y_new[label_pos] = 1
return y_new
def class_avg_finetune(model, texts, labels, nb_classes, batch_size,
method, epoch_size=5000,
nb_epochs=1000, error_checking=True,
verbose=True):
""" Compiles and finetunes the given model.
# Arguments:
model: Model to be finetuned
texts: List of three lists, containing tokenized inputs for training,
validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
nb_classes: Number of classes in the dataset.
batch_size: Batch size.
method: Finetuning method to be used. For available methods, see
FINETUNING_METHODS in global_variables.py. Note that the model
should be defined accordingly (see docstring for deepmoji_transfer())
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs. Doesn't matter much as early stopping is used.
error_checking: If set to True, warnings will be printed when the label
list has the wrong dimensions.
verbose: Verbosity flag.
# Returns:
Model after finetuning,
score after finetuning using the class average F1 metric.
"""
if method not in FINETUNING_METHODS:
raise ValueError('ERROR (class_avg_tune_trainable): '
'Invalid method parameter. '
'Available options: {}'.format(FINETUNING_METHODS))
(X_train, y_train) = (texts[0], labels[0])
(X_val, y_val) = (texts[1], labels[1])
(X_test, y_test) = (texts[2], labels[2])
checkpoint_path = '{}/deepmoji-checkpoint-{}.hdf5' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
f1_init_path = '{}/deepmoji-f1-init-{}.hdf5' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
# Check dimension of labels
if error_checking:
# Binary classification has two classes but one value
expected_shape = 1 if nb_classes == 2 else nb_classes
for ls in [y_train, y_val, y_test]:
if len(ls.shape) <= 1 or not ls.shape[1] == expected_shape:
print('WARNING (class_avg_tune_trainable): '
'The dimension of the provided '
'labels do not match the expected value. '
'Expected: {}, actual: {}'
.format(expected_shape, ls.shape[1]))
break
if method in ['last', 'new']:
lr = 0.001
elif method in ['full', 'chain-thaw']:
lr = 0.0001
loss = 'binary_crossentropy'
# Freeze layers if using last
if method == 'last':
model = freeze_layers(model, unfrozen_keyword='softmax')
# Compile model, for chain-thaw we compile it later (after freezing)
if method != 'chain-thaw':
adam = Adam(clipnorm=1, lr=lr)
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
# Training
if verbose:
print('Method: {}'.format(method))
print('Classes: {}'.format(nb_classes))
if method == 'chain-thaw':
result = class_avg_chainthaw(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
batch_size=batch_size, loss=loss,
epoch_size=epoch_size,
nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_path,
f1_init_weight_path=f1_init_path,
verbose=verbose)
else:
result = class_avg_tune_trainable(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
epoch_size=epoch_size,
nb_epochs=nb_epochs,
batch_size=batch_size,
init_weight_path=f1_init_path,
checkpoint_weight_path=checkpoint_path,
verbose=verbose)
return model, result
def prepare_labels(y_train, y_val, y_test, iter_i, nb_classes):
# Relabel into binary classification
y_train_new = relabel(y_train, iter_i, nb_classes)
y_val_new = relabel(y_val, iter_i, nb_classes)
y_test_new = relabel(y_test, iter_i, nb_classes)
return y_train_new, y_val_new, y_test_new
def prepare_generators(X_train, y_train_new, X_val, y_val_new, batch_size, epoch_size):
# Create sample generators
# Make a fixed validation set to avoid fluctuations in validation
train_gen = sampling_generator(X_train, y_train_new, batch_size,
upsample=False)
val_gen = sampling_generator(X_val, y_val_new,
epoch_size, upsample=False)
X_val_resamp, y_val_resamp = next(val_gen)
return train_gen, X_val_resamp, y_val_resamp
def class_avg_tune_trainable(model, nb_classes, train, val, test, epoch_size,
nb_epochs, batch_size, init_weight_path,
checkpoint_weight_path, patience=5,
verbose=True):
""" Finetunes the given model using the F1 measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
init_weight_path: Filepath where weights will be initially saved before
training each class. This file will be rewritten by the function.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
verbose: Verbosity flag.
# Returns:
F1 score of the trained model
"""
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
# Save and reload initial weights after running for
# each class to avoid learning across classes
model.save_weights(init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i + 1, nb_iter))
model.load_weights(init_weight_path, by_name=False)
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose=2)
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
max_q_size=2, epochs=nb_epochs,
validation_data=(X_val_resamp, y_val_resamp),
callbacks=callbacks, verbose=0)
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
# Evaluate
y_pred_val = np.array(model.predict(X_val, batch_size=batch_size))
y_pred_test = np.array(model.predict(X_test, batch_size=batch_size))
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter
def class_avg_chainthaw(model, nb_classes, train, val, test, batch_size,
loss, epoch_size, nb_epochs, checkpoint_weight_path,
f1_init_weight_path, patience=5,
initial_lr=0.001, next_lr=0.0001,
seed=None, verbose=True):
""" Finetunes given model using chain-thaw and evaluates using F1.
For a dataset with multiple classes, the model is trained once for
each class, relabeling those classes into a binary classification task.
The result is an average of all F1 scores for each class.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
batch_size: Batch size.
loss: Loss function to be used during training.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
f1_init_weight_path: Filepath where weights will be saved to and
reloaded from before training each class. This ensures that
each class is trained independently. This file will be rewritten.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
seed: Random number generator seed.
verbose: Verbosity flag.
# Returns:
Averaged F1 score.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
model.save_weights(f1_init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i + 1, nb_iter))
model.load_weights(f1_init_weight_path, by_name=False)
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
callbacks = finetuning_callbacks(checkpoint_weight_path, patience=patience, verbose=2)
# Train using chain-thaw
train_by_chain_thaw(model=model, train_gen=train_gen,
val_data=(X_val_resamp, y_val_resamp),
loss=loss, callbacks=callbacks,
epoch_size=epoch_size, nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_weight_path,
initial_lr=initial_lr, next_lr=next_lr,
batch_size=batch_size, verbose=verbose)
# Evaluate
y_pred_val = np.array(model.predict(X_val, batch_size=batch_size))
y_pred_test = np.array(model.predict(X_test, batch_size=batch_size))
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter
| 13,300 | 39.675841 | 94 | py |
DeepMoji | DeepMoji-master/deepmoji/model_def.py | """ Model definition functions and weight loading.
"""
from __future__ import print_function, division
from keras.models import Model, Sequential
from keras.layers.merge import concatenate
from keras.layers import Input, Bidirectional, Embedding, Dense, Dropout, SpatialDropout1D, LSTM, Activation
from keras.regularizers import L1L2
from attlayer import AttentionWeightedAverage
from global_variables import NB_TOKENS, NB_EMOJI_CLASSES
import numpy as np
from copy import deepcopy
from os.path import exists
import h5py
def deepmoji_feature_encoding(maxlen, weight_path, return_attention=False):
""" Loads the pretrained DeepMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
maxlen: Maximum length of a sentence (given in tokens).
weight_path: Path to model weights to be loaded.
return_attention: If true, output will be weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = deepmoji_architecture(nb_classes=None, nb_tokens=NB_TOKENS,
maxlen=maxlen, feature_output=True,
return_attention=return_attention)
load_specific_weights(model, weight_path, exclude_names=['softmax'])
return model
def deepmoji_emojis(maxlen, weight_path, return_attention=False):
""" Loads the pretrained DeepMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
maxlen: Maximum length of a sentence (given in tokens).
weight_path: Path to model weights to be loaded.
return_attention: If true, output will be weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = deepmoji_architecture(nb_classes=NB_EMOJI_CLASSES,
nb_tokens=NB_TOKENS, maxlen=maxlen,
return_attention=return_attention)
model.load_weights(weight_path, by_name=False)
return model
def deepmoji_transfer(nb_classes, maxlen, weight_path=None, extend_embedding=0,
embed_dropout_rate=0.25, final_dropout_rate=0.5,
embed_l2=1E-6):
""" Loads the pretrained DeepMoji model for finetuning/transfer learning.
Does not load weights for the softmax layer.
Note that if you are planning to use class average F1 for evaluation,
nb_classes should be set to 2 instead of the actual number of classes
in the dataset, since binary classification will be performed on each
class individually.
Note that for the 'new' method, weight_path should be left as None.
# Arguments:
nb_classes: Number of classes in the dataset.
maxlen: Maximum length of a sentence (given in tokens).
weight_path: Path to model weights to be loaded.
extend_embedding: Number of tokens that have been added to the
vocabulary on top of NB_TOKENS. If this number is larger than 0,
the embedding layer's dimensions are adjusted accordingly, with the
additional weights being set to random values.
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
embed_l2: L2 regularization for the embedding layerl.
# Returns:
Model with the given parameters.
"""
model = deepmoji_architecture(nb_classes=nb_classes,
nb_tokens=NB_TOKENS + extend_embedding,
maxlen=maxlen, embed_dropout_rate=embed_dropout_rate,
final_dropout_rate=final_dropout_rate, embed_l2=embed_l2)
if weight_path is not None:
load_specific_weights(model, weight_path,
exclude_names=['softmax'],
extend_embedding=extend_embedding)
return model
def deepmoji_architecture(nb_classes, nb_tokens, maxlen, feature_output=False, embed_dropout_rate=0, final_dropout_rate=0, embed_l2=1E-6, return_attention=False):
"""
Returns the DeepMoji architecture uninitialized and
without using the pretrained model weights.
# Arguments:
nb_classes: Number of classes in the dataset.
nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).
maxlen: Maximum length of a token.
feature_output: If True the model returns the penultimate
feature vector rather than Softmax probabilities
(defaults to False).
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
embed_l2: L2 regularization for the embedding layerl.
# Returns:
Model with the given parameters.
"""
# define embedding layer that turns word tokens into vectors
# an activation function is used to bound the values of the embedding
model_input = Input(shape=(maxlen,), dtype='int32')
embed_reg = L1L2(l2=embed_l2) if embed_l2 != 0 else None
embed = Embedding(input_dim=nb_tokens,
output_dim=256,
mask_zero=True,
input_length=maxlen,
embeddings_regularizer=embed_reg,
name='embedding')
x = embed(model_input)
x = Activation('tanh')(x)
# entire embedding channels are dropped out instead of the
# normal Keras embedding dropout, which drops all channels for entire words
# many of the datasets contain so few words that losing one or more words can alter the emotions completely
if embed_dropout_rate != 0:
embed_drop = SpatialDropout1D(embed_dropout_rate, name='embed_drop')
x = embed_drop(x)
# skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
# ordering of the way the merge is done is important for consistency with the pretrained model
lstm_0_output = Bidirectional(LSTM(512, return_sequences=True), name="bi_lstm_0")(x)
lstm_1_output = Bidirectional(LSTM(512, return_sequences=True), name="bi_lstm_1")(lstm_0_output)
x = concatenate([lstm_1_output, lstm_0_output, x])
# if return_attention is True in AttentionWeightedAverage, an additional tensor
# representing the weight at each timestep is returned
weights = None
x = AttentionWeightedAverage(name='attlayer', return_attention=return_attention)(x)
if return_attention:
x, weights = x
if not feature_output:
# output class probabilities
if final_dropout_rate != 0:
x = Dropout(final_dropout_rate)(x)
if nb_classes > 2:
outputs = [Dense(nb_classes, activation='softmax', name='softmax')(x)]
else:
outputs = [Dense(1, activation='sigmoid', name='softmax')(x)]
else:
# output penultimate feature vector
outputs = [x]
if return_attention:
# add the attention weights to the outputs if required
outputs.append(weights)
return Model(inputs=[model_input], outputs=outputs, name="DeepMoji")
def load_specific_weights(model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):
""" Loads model weights from the given file path, excluding any
given layers.
# Arguments:
model: Model whose weights should be loaded.
weight_path: Path to file containing model weights.
exclude_names: List of layer names whose weights should not be loaded.
extend_embedding: Number of new words being added to vocabulary.
verbose: Verbosity flag.
# Raises:
ValueError if the file at weight_path does not exist.
"""
if not exists(weight_path):
raise ValueError('ERROR (load_weights): The weights file at {} does '
'not exist. Refer to the README for instructions.'
.format(weight_path))
if extend_embedding and 'embedding' in exclude_names:
raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '
'without loading the embedding weights.')
# Copy only weights from the temporary model that are wanted
# for the specific task (e.g. the Softmax is often ignored)
layer_weights = get_weights_from_hdf5(weight_path)
for i, w in enumerate(layer_weights):
l_name = w[0]
weight_names = w[1]
weight_values = w[2]
if l_name in exclude_names:
if verbose:
print('Ignoring weights for {}'.format(l_name))
continue
try:
model_l = model.get_layer(name=l_name)
except ValueError:
raise ValueError("Weights had layer {},".format(l_name) +
" but could not find this layer in model.")
if verbose:
print('Loading weights for {}'.format(l_name))
# extend embedding layer to allow new randomly initialized words
# if requested. Otherwise, just load the weights for the layer.
if type(model_l) is Embedding and extend_embedding > 0:
comb_weights = append_to_embedding(weight_values,
model_l.get_weights())
model_l.set_weights(comb_weights)
if verbose:
print('Extended vocabulary for embedding layer ' +
'from {} to {} tokens.'.format(
NB_TOKENS, NB_TOKENS + extend_embedding))
else:
model_l.set_weights(weight_values)
def append_to_embedding(pretrain_weights, random_init_weights):
""" Uses pretrained weights for the tokens already in the vocabulary.
Remaining weights will be left with the random initialization. """
pretrain_weights = deepcopy(pretrain_weights)
if type(pretrain_weights) == list:
pretrain_weights = pretrain_weights[0]
if type(random_init_weights) == list:
random_init_weights = random_init_weights[0]
nb_old_tokens = np.shape(pretrain_weights)[0]
random_init_weights[:nb_old_tokens] = pretrain_weights
# must be returned as a list to be properly inserted into Keras model
return [random_init_weights]
def get_weights_from_hdf5(filepath):
""" Loads the weights from a saved Keras model into numpy arrays.
The weights are saved using Keras 2.0 so we don't need all the
conversion functionality for handling old weights.
"""
with h5py.File(filepath, mode='r') as f:
layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
layer_weights = []
for k, l_name in enumerate(layer_names):
g = f[l_name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = [g[weight_name][:] for weight_name in weight_names]
if len(weight_values):
layer_weights.append([l_name, weight_names, weight_values])
return layer_weights
| 11,369 | 41.58427 | 162 | py |
DeepMoji | DeepMoji-master/deepmoji/attlayer.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import sys
from os.path import dirname
sys.path.append(dirname(dirname(__file__)))
from keras import initializers
from keras.engine import InputSpec, Layer
from keras import backend as K
class AttentionWeightedAverage(Layer):
"""
Computes a weighted average of the different channels across timesteps.
Uses 1 parameter pr. channel to compute the attention value for a single timestep.
"""
def __init__(self, return_attention=False, **kwargs):
self.init = initializers.get('uniform')
self.supports_masking = True
self.return_attention = return_attention
super(AttentionWeightedAverage, self).__init__(** kwargs)
def get_config(self):
config = {
'return_attention': self.return_attention,
}
base_config = super(AttentionWeightedAverage, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self.input_spec = [InputSpec(ndim=3)]
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[2], 1),
name='{}_W'.format(self.name),
initializer=self.init)
self.trainable_weights = [self.W]
super(AttentionWeightedAverage, self).build(input_shape)
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def get_output_shape_for(self, input_shape):
return self.compute_output_shape(input_shape)
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return (input_shape[0], output_len)
def compute_mask(self, input, input_mask=None):
if isinstance(input_mask, list):
return [None] * len(input_mask)
else:
return None
| 2,792 | 35.75 | 86 | py |
DeepMoji | DeepMoji-master/deepmoji/finetuning.py | """ Finetuning functions for doing transfer learning to new datasets.
"""
from __future__ import print_function
import sys
import uuid
from time import sleep
import h5py
import math
import pickle
import numpy as np
from keras.layers.wrappers import Bidirectional, TimeDistributed
from sklearn.metrics import f1_score
from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from keras.models import model_from_json
from global_variables import (
FINETUNING_METHODS,
FINETUNING_METRICS,
WEIGHTS_DIR)
from tokenizer import tokenize
from sentence_tokenizer import SentenceTokenizer
from attlayer import AttentionWeightedAverage
def load_benchmark(path, vocab, extend_with=0):
""" Loads the given benchmark dataset.
Tokenizes the texts using the provided vocabulary, extending it with
words from the training dataset if extend_with > 0. Splits them into
three lists: training, validation and testing (in that order).
Also calculates the maximum length of the texts and the
suggested batch_size.
# Arguments:
path: Path to the dataset to be loaded.
vocab: Vocabulary to be used for tokenizing texts.
extend_with: If > 0, the vocabulary will be extended with up to
extend_with tokens from the training set before tokenizing.
# Returns:
A dictionary with the following fields:
texts: List of three lists, containing tokenized inputs for
training, validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
added: Number of tokens added to the vocabulary.
batch_size: Batch size.
maxlen: Maximum length of an input.
"""
# Pre-processing dataset
with open(path) as dataset:
data = pickle.load(dataset)
# Decode data
try:
texts = [unicode(x) for x in data['texts']]
except UnicodeDecodeError:
texts = [x.decode('utf-8') for x in data['texts']]
# Extract labels
labels = [x['label'] for x in data['info']]
batch_size, maxlen = calculate_batchsize_maxlen(texts)
st = SentenceTokenizer(vocab, maxlen)
# Split up dataset. Extend the existing vocabulary with up to extend_with
# tokens from the training dataset.
texts, labels, added = st.split_train_val_test(texts,
labels,
[data['train_ind'],
data['val_ind'],
data['test_ind']],
extend_with=extend_with)
return {'texts': texts,
'labels': labels,
'added': added,
'batch_size': batch_size,
'maxlen': maxlen}
def calculate_batchsize_maxlen(texts):
""" Calculates the maximum length in the provided texts and a suitable
batch size. Rounds up maxlen to the nearest multiple of ten.
# Arguments:
texts: List of inputs.
# Returns:
Batch size,
max length
"""
def roundup(x):
return int(math.ceil(x / 10.0)) * 10
# Calculate max length of sequences considered
# Adjust batch_size accordingly to prevent GPU overflow
lengths = [len(tokenize(t)) for t in texts]
maxlen = roundup(np.percentile(lengths, 80.0))
batch_size = 250 if maxlen <= 100 else 50
return batch_size, maxlen
def finetuning_callbacks(checkpoint_path, patience, verbose):
""" Callbacks for model training.
# Arguments:
checkpoint_path: Where weight checkpoints should be saved.
patience: Number of epochs with no improvement after which
training will be stopped.
# Returns:
Array with training callbacks that can be passed straight into
model.fit() or similar.
"""
cb_verbose = (verbose >= 2)
checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,
save_best_only=True, verbose=cb_verbose)
earlystop = EarlyStopping(monitor='val_loss', patience=patience,
verbose=cb_verbose)
return [checkpointer, earlystop]
def freeze_layers(model, unfrozen_types=[], unfrozen_keyword=None):
""" Freezes all layers in the given model, except for ones that are
explicitly specified to not be frozen.
# Arguments:
model: Model whose layers should be modified.
unfrozen_types: List of layer types which shouldn't be frozen.
unfrozen_keyword: Name keywords of layers that shouldn't be frozen.
# Returns:
Model with the selected layers frozen.
"""
for l in model.layers:
if len(l.trainable_weights):
trainable = (type(l) in unfrozen_types or
(unfrozen_keyword is not None and unfrozen_keyword in l.name))
change_trainable(l, trainable, verbose=False)
return model
def change_trainable(layer, trainable, verbose=False):
""" Helper method that fixes some of Keras' issues with wrappers and
trainability. Freezes or unfreezes a given layer.
# Arguments:
layer: Layer to be modified.
trainable: Whether the layer should be frozen or unfrozen.
verbose: Verbosity flag.
"""
layer.trainable = trainable
if type(layer) == Bidirectional:
layer.backward_layer.trainable = trainable
layer.forward_layer.trainable = trainable
if type(layer) == TimeDistributed:
layer.backward_layer.trainable = trainable
if verbose:
action = 'Unfroze' if trainable else 'Froze'
print("{} {}".format(action, layer.name))
def find_f1_threshold(y_val, y_pred_val, y_test, y_pred_test,
average='binary'):
""" Choose a threshold for F1 based on the validation dataset
(see https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4442797/
for details on why to find another threshold than simply 0.5)
# Arguments:
y_val: Outputs of the validation dataset.
y_pred_val: Predicted outputs of the validation dataset.
y_test: Outputs of the testing dataset.
y_pred_test: Predicted outputs of the testing dataset.
# Returns:
F1 score for the given data and
the corresponding F1 threshold
"""
thresholds = np.arange(0.01, 0.5, step=0.01)
f1_scores = []
for t in thresholds:
y_pred_val_ind = (y_pred_val > t)
f1_val = f1_score(y_val, y_pred_val_ind, average=average)
f1_scores.append(f1_val)
best_t = thresholds[np.argmax(f1_scores)]
y_pred_ind = (y_pred_test > best_t)
f1_test = f1_score(y_test, y_pred_ind, average=average)
return f1_test, best_t
def relabel(y, current_label_nr, nb_classes):
""" Makes a binary classification for a specific class in a
multi-class dataset.
# Arguments:
y: Outputs to be relabelled.
current_label_nr: Current label number.
nb_classes: Total number of classes.
# Returns:
Relabelled outputs of a given multi-class dataset into a binary
classification dataset.
"""
# Handling binary classification
if nb_classes == 2 and len(y.shape) == 1:
return y
y_new = np.zeros(len(y))
y_cut = y[:, current_label_nr]
label_pos = np.where(y_cut == 1)[0]
y_new[label_pos] = 1
return y_new
def sampling_generator(X_in, y_in, batch_size, epoch_size=25000,
upsample=False, seed=42):
""" Returns a generator that enables larger epochs on small datasets and
has upsampling functionality.
# Arguments:
X_in: Inputs of the given dataset.
y_in: Outputs of the given dataset.
batch_size: Batch size.
epoch_size: Number of samples in an epoch.
upsample: Whether upsampling should be done. This flag should only be
set on binary class problems.
seed: Random number generator seed.
# Returns:
Sample generator.
"""
np.random.seed(seed)
if upsample:
# Should only be used on binary class problems
assert len(y_in.shape) == 1
neg = np.where(y_in == 0)[0]
pos = np.where(y_in == 1)[0]
assert epoch_size % 2 == 0
samples_pr_class = int(epoch_size / 2)
else:
ind = range(len(X_in))
# Keep looping until training halts
while True:
if not upsample:
# Randomly sample observations in a balanced way
sample_ind = np.random.choice(ind, epoch_size, replace=True)
X, y = X_in[sample_ind], y_in[sample_ind]
else:
# Randomly sample observations in a balanced way
sample_neg = np.random.choice(neg, samples_pr_class, replace=True)
sample_pos = np.random.choice(pos, samples_pr_class, replace=True)
X = np.concatenate((X_in[sample_neg], X_in[sample_pos]), axis=0)
y = np.concatenate((y_in[sample_neg], y_in[sample_pos]), axis=0)
# Shuffle to avoid labels being in specific order
# (all negative then positive)
p = np.random.permutation(len(X))
X, y = X[p], y[p]
label_dist = np.mean(y)
assert(label_dist > 0.45)
assert(label_dist < 0.55)
# Hand-off data using batch_size
for i in range(int(epoch_size / batch_size)):
start = i * batch_size
end = min(start + batch_size, epoch_size)
yield (X[start:end], y[start:end])
def finetune(model, texts, labels, nb_classes, batch_size, method,
metric='acc', epoch_size=5000, nb_epochs=1000,
error_checking=True, verbose=1):
""" Compiles and finetunes the given model.
# Arguments:
model: Model to be finetuned
texts: List of three lists, containing tokenized inputs for training,
validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
nb_classes: Number of classes in the dataset.
batch_size: Batch size.
method: Finetuning method to be used. For available methods, see
FINETUNING_METHODS in global_variables.py.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs. Doesn't matter much as early stopping is used.
metric: Evaluation metric to be used. For available metrics, see
FINETUNING_METRICS in global_variables.py.
error_checking: If set to True, warnings will be printed when the label
list has the wrong dimensions.
verbose: Verbosity flag.
# Returns:
Model after finetuning,
score after finetuning using the provided metric.
"""
if method not in FINETUNING_METHODS:
raise ValueError('ERROR (finetune): Invalid method parameter. '
'Available options: {}'.format(FINETUNING_METHODS))
if metric not in FINETUNING_METRICS:
raise ValueError('ERROR (finetune): Invalid metric parameter. '
'Available options: {}'.format(FINETUNING_METRICS))
(X_train, y_train) = (texts[0], labels[0])
(X_val, y_val) = (texts[1], labels[1])
(X_test, y_test) = (texts[2], labels[2])
checkpoint_path = '{}/deepmoji-checkpoint-{}.hdf5' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
# Check dimension of labels
if error_checking:
for ls in [y_train, y_val, y_test]:
if not ls.ndim == 1:
print('WARNING (finetune): The dimension of the '
'provided label list does not match the expected '
'value. When using the \'{}\' metric, the labels '
'should be a 1-dimensional array. '
'Input shape was {}'.format(metric, ls.shape))
break
if method in ['last', 'new']:
lr = 0.001
elif method in ['full', 'chain-thaw']:
lr = 0.0001
loss = 'binary_crossentropy' if nb_classes <= 2 \
else 'categorical_crossentropy'
# Freeze layers if using last
if method == 'last':
model = freeze_layers(model, unfrozen_keyword='softmax')
# Compile model, for chain-thaw we compile it later (after freezing)
if method != 'chain-thaw':
adam = Adam(clipnorm=1, lr=lr)
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
# Training
if verbose:
print('Method: {}'.format(method))
print('Metric: {}'.format(metric))
print('Classes: {}'.format(nb_classes))
if method == 'chain-thaw':
result = chain_thaw(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
batch_size=batch_size, loss=loss,
epoch_size=epoch_size,
nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_path,
evaluate=metric, verbose=verbose)
else:
result = tune_trainable(model, nb_classes=nb_classes,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
epoch_size=epoch_size,
nb_epochs=nb_epochs,
batch_size=batch_size,
checkpoint_weight_path=checkpoint_path,
evaluate=metric, verbose=verbose)
return model, result
def tune_trainable(model, nb_classes, train, val, test, epoch_size,
nb_epochs, batch_size, checkpoint_weight_path,
patience=5, evaluate='acc', verbose=1):
""" Finetunes the given model using the accuracy measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
patience: Patience for callback methods.
evaluate: Evaluation method to use. Can be 'acc' or 'weighted_f1'.
verbose: Verbosity flag.
# Returns:
Accuracy of the trained model, ONLY if 'evaluate' is set.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
if nb_classes > 2:
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
if verbose:
print("Trainable weights: {}".format(model.trainable_weights))
print("Training..")
# Use sample generator for fixed-size epoch
train_gen = sampling_generator(X_train, y_train,
batch_size, upsample=False)
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose)
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
epochs=nb_epochs,
validation_data=(X_val, y_val),
validation_steps=steps,
callbacks=callbacks, verbose=(verbose >= 2))
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
if verbose >= 2:
print("Loaded weights from {}".format(checkpoint_weight_path))
if evaluate == 'acc':
return evaluate_using_acc(model, X_test, y_test, batch_size=batch_size)
elif evaluate == 'weighted_f1':
return evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size=batch_size)
def evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size):
""" Evaluation function using macro weighted F1 score.
# Arguments:
model: Model to be evaluated.
X_test: Inputs of the testing set.
y_test: Outputs of the testing set.
X_val: Inputs of the validation set.
y_val: Outputs of the validation set.
batch_size: Batch size.
# Returns:
Weighted F1 score of the given model.
"""
y_pred_test = np.array(model.predict(X_test, batch_size=batch_size))
y_pred_val = np.array(model.predict(X_val, batch_size=batch_size))
f1_test, _ = find_f1_threshold(y_val, y_pred_val, y_test, y_pred_test,
average='weighted_f1')
return f1_test
def evaluate_using_acc(model, X_test, y_test, batch_size):
""" Evaluation function using accuracy.
# Arguments:
model: Model to be evaluated.
X_test: Inputs of the testing set.
y_test: Outputs of the testing set.
batch_size: Batch size.
# Returns:
Accuracy of the given model.
"""
_, acc = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
return acc
def chain_thaw(model, nb_classes, train, val, test, batch_size,
loss, epoch_size, nb_epochs, checkpoint_weight_path,
patience=5,
initial_lr=0.001, next_lr=0.0001, seed=None,
verbose=1, evaluate='acc'):
""" Finetunes given model using chain-thaw and evaluates using accuracy.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
batch_size: Batch size.
loss: Loss function to be used during training.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
seed: Random number generator seed.
verbose: Verbosity flag.
evaluate: Evaluation method to use. Can be 'acc' or 'weighted_f1'.
# Returns:
Accuracy of the finetuned model.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
if nb_classes > 2:
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
y_test = to_categorical(y_test)
if verbose:
print('Training..')
# Use sample generator for fixed-size epoch
train_gen = sampling_generator(X_train, y_train, batch_size,
upsample=False, seed=seed)
callbacks = finetuning_callbacks(checkpoint_weight_path, patience, verbose)
# Train using chain-thaw
train_by_chain_thaw(model=model, train_gen=train_gen,
val_data=(X_val, y_val), loss=loss, callbacks=callbacks,
epoch_size=epoch_size, nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_weight_path,
batch_size=batch_size, verbose=verbose)
if evaluate == 'acc':
return evaluate_using_acc(model, X_test, y_test, batch_size=batch_size)
elif evaluate == 'weighted_f1':
return evaluate_using_weighted_f1(model, X_test, y_test, X_val, y_val,
batch_size=batch_size)
def train_by_chain_thaw(model, train_gen, val_data, loss, callbacks, epoch_size,
nb_epochs, checkpoint_weight_path, batch_size,
initial_lr=0.001, next_lr=0.0001, verbose=1):
""" Finetunes model using the chain-thaw method.
This is done as follows:
1) Freeze every layer except the last (softmax) layer and train it.
2) Freeze every layer except the first layer and train it.
3) Freeze every layer except the second etc., until the second last layer.
4) Unfreeze all layers and train entire model.
# Arguments:
model: Model to be trained.
train_gen: Training sample generator.
val_data: Validation data.
loss: Loss function to be used.
callbacks: Training callbacks to be used.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Where weight checkpoints should be saved.
batch_size: Batch size.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
verbose: Verbosity flag.
"""
# Get trainable layers
layers = [layer for layer in model.layers
if len(layer.trainable_weights)]
# Bring last layer to front
layers.insert(0, layers.pop(len(layers) - 1))
# Add None to the end to signify finetuning all layers
layers.append(None)
lr = None
# Finetune each layer one by one and finetune all of them at once
# at the end
for layer in layers:
if lr is None:
lr = initial_lr
elif lr == initial_lr:
lr = next_lr
adam = Adam(clipnorm=1, lr=lr)
# Freeze all except current layer
for _layer in layers:
if _layer is not None:
trainable = _layer == layer or layer is None
change_trainable(_layer, trainable=trainable, verbose=False)
# Verify we froze the right layers
for _layer in model.layers:
if _layer is not None and len(_layer.trainable_weights):
assert _layer.trainable == (_layer == layer) or layer is None
model.cache = False
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
model.cache = True
if verbose:
if layer is None:
print('Finetuning all layers')
else:
print('Finetuning {}'.format(layer.name))
steps = int(epoch_size / batch_size)
model.fit_generator(train_gen, steps_per_epoch=steps,
epochs=nb_epochs, validation_data=val_data,
callbacks=callbacks, verbose=(verbose >= 2))
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_weights(checkpoint_weight_path, by_name=False)
if verbose >= 2:
print("Loaded weights from {}".format(checkpoint_weight_path))
| 23,552 | 36.267405 | 87 | py |
DeepMoji | DeepMoji-master/examples/imdb_from_scratch.py | """Trains the DeepMoji architecture on the IMDB sentiment classification task.
This is a simple example of using the architecture without the pretrained model.
The architecture is designed for transfer learning - it should normally
be used with the pretrained model for optimal performance.
"""
from __future__ import print_function
import example_helper
import numpy as np
from keras.preprocessing import sequence
from keras.datasets import imdb
from deepmoji.model_def import deepmoji_architecture
# Seed for reproducibility
np.random.seed(1337)
nb_tokens = 20000
maxlen = 80
batch_size = 32
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=nb_tokens)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = deepmoji_architecture(nb_classes=2, nb_tokens=nb_tokens, maxlen=maxlen)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, epochs=15,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| 1,492 | 32.177778 | 83 | py |
DeepMoji | DeepMoji-master/scripts/analyze_all_results.py | from __future__ import print_function
# allow us to import the codebase/keras directory
import sys
import glob
import numpy as np
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
DATASETS = ['SE0714', 'Olympic', 'PsychExp', 'SS-Twitter', 'SS-Youtube',
'SCv1', 'SV2-GEN'] # 'SE1604' excluded due to Twitter's ToS
def get_results(dset):
METHOD = 'last'
RESULTS_DIR = 'results/'
RESULT_PATHS = glob.glob('{}/{}_{}_*_results.txt'.format(RESULTS_DIR, dset, METHOD))
assert len(RESULT_PATHS)
scores = []
for path in RESULT_PATHS:
with open(path) as f:
score = f.readline().split(':')[1]
scores.append(float(score))
average = np.mean(scores)
maximum = max(scores)
minimum = min(scores)
std = np.std(scores)
print('Dataset: {}'.format(dset))
print('Method: {}'.format(METHOD))
print('Number of results: {}'.format(len(scores)))
print('--------------------------')
print('Average: {}'.format(average))
print('Maximum: {}'.format(maximum))
print('Minimum: {}'.format(minimum))
print('Standard deviaton: {}'.format(std))
for dset in DATASETS:
get_results(dset)
| 1,221 | 27.418605 | 88 | py |
separator | separator-main/scripts/generate_3way_wikianswers.py | import jsonlines, os, json
import numpy as np
from flair.models import SequenceTagger
from flair.data import Sentence
from collections import defaultdict, Counter
from tqdm import tqdm
from copy import deepcopy
import torch
# predictor = Predictor.from_archive(load_archive("https://storage.googleapis.com/allennlp-public-models/elmo-constituency-parser-2020.02.10.tar.gz"))
import argparse
parser = argparse.ArgumentParser(
description="WikiAnswers to 3way script",
)
parser.add_argument(
"--data_dir", type=str, metavar="PATH", default='./data/', help="Path to data folder"
)
parser.add_argument(
"--dataset", type=str, metavar="PATH", default='wa-triples', help="Source dataset"
)
parser.add_argument("--debug", action="store_true", help="Debug mode")
parser.add_argument("--pos_templates", action="store_true", help="Use POS tags for templating")
parser.add_argument("--constit_templates", action="store_true", help="Use constituency parses for templating")
parser.add_argument("--use_diff_templ_for_sem", action="store_true", help="Force different templates between tgt and sem input")
parser.add_argument("--use_stop_class", action="store_true", help="Convert stopwords to a STOP class for templating")
parser.add_argument("--extended_stopwords", action="store_true", help="Use an extended stopwords list")
parser.add_argument("--no_stopwords", action="store_true", help="Don't use any stopwords")
parser.add_argument("--single_vocab", action="store_true", help="Use one vocab and ignore tags")
parser.add_argument("--resample_cluster", action="store_true", help="Generate the full sample size from each cluster, even if the cluster is smaller")
parser.add_argument("--uniform_sampling", action="store_true", help="Sample from the vocab unformly rather than weighted by occurrence")
parser.add_argument("--real_exemplars", action="store_true", help="Use exemplars from the dataset if possible")
parser.add_argument(
"--sample_size", type=int, metavar="N", default=10, help="Number of samples per cluster"
)
parser.add_argument(
"--rate", type=float, metavar="RATE", default=0.5, help="Template noising rate"
)
parser.add_argument(
"--template_dropout", type=float, metavar="TEMPL_DROP", default=0.0, help="Prob of using an arbitrarily different template from the same cluster"
)
parser.add_argument(
"--seed", type=int, metavar="SEED", default=123, help="Random seed"
)
args = parser.parse_args()
DEBUG = args.debug
np.random.seed(args.seed)
if args.pos_templates:
tagger = SequenceTagger.load('pos')
elif args.constit_templates:
from allennlp.predictors.predictor import Predictor
import allennlp_models.structured_prediction
from allennlp.models.archival import load_archive
predictor = Predictor.from_archive(
load_archive("https://storage.googleapis.com/allennlp-public-models/elmo-constituency-parser-2020.02.10.tar.gz",
cuda_device=torch.cuda.current_device()),
)
else:
tagger = SequenceTagger.load('chunk')
splits = ['train','dev','test']
SAMPLES_PER_CLUSTER = args.sample_size
KEEP_FRACTION = 1 - args.rate
rate_str = int(100 * args.rate)
if args.dataset == 'wa-triples':
dataset = 'wikianswers-triples'
else:
dataset = args.dataset
modifiers = "__".join(dataset.split('/'))
cache_key = 'cache'
if args.debug:
modifiers += '-debug'
if args.pos_templates:
modifiers += '-pos'
elif args.constit_templates:
modifiers += '-constit'
else:
modifiers += '-chunk'
if args.use_stop_class:
modifiers += '-stopclass'
if args.no_stopwords:
modifiers += '-nostop'
if args.extended_stopwords:
modifiers += '-extendstop'
if args.single_vocab:
modifiers += '-combinedvocab'
cache_key += modifiers
if args.real_exemplars:
modifiers += '-realexemplars'
if args.resample_cluster:
modifiers += '-resample'
if args.uniform_sampling:
modifiers += '-uniform'
if args.seed != 123:
seed = args.seed
modifiers += f'-seed{seed}'
if args.template_dropout > 0:
dropstr = str(int(args.template_dropout * 100))
# print(dropstr)
modifiers += f"-drop{dropstr}"
if not args.use_diff_templ_for_sem:
modifiers += '-unforced'
name_slug = f"{modifiers}-N{SAMPLES_PER_CLUSTER}-R{rate_str}"
os.makedirs(os.path.join(args.data_dir, f'{name_slug}/'), exist_ok=True)
stopwords = []
tags_to_preserve = []
if not args.no_stopwords:
stopwords += ['who','what','when','where', 'why','how', 'many', 'which']
if args.extended_stopwords:
tags_to_preserve += ['.','WP','IN','$','``',"''",'DT','PRP','SYM',':']
stopwords += ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now']
# tags_to_preserve = ['.','WP','IN','$','``',"''",'DT','PRP','SYM',':']
def get_chunk_parse(sent, use_stop_class=False):
parse = [tok.labels[0].value for tok in sent.tokens]
toks = [tok.text for tok in sent.tokens]
parse_filtered = []
toks_filtered = []
curr_tag = None
span_text = []
for i in range(len(parse)):
tok = toks[i]
if tok.lower() not in stopwords:
tag_parts = parse[i].split('-')
if args.pos_templates:
tag_parts = [''] + tag_parts
if len(tag_parts) > 1:
this_tag = tag_parts[1]
if this_tag == curr_tag:
span_text.append(tok)
elif curr_tag is not None:
parse_filtered.append(curr_tag)
toks_filtered.append(" ".join(span_text))
curr_tag = this_tag
span_text = [tok]
else:
curr_tag = this_tag
span_text = [tok]
else:
if len(span_text) > 0:
parse_filtered.append(curr_tag)
toks_filtered.append(" ".join(span_text))
curr_tag = None
span_text = []
parse_filtered.append(curr_tag)
toks_filtered.append(tok)
else:
if len(span_text) > 0:
parse_filtered.append(curr_tag)
toks_filtered.append(" ".join(span_text))
curr_tag = None
parse_filtered.append('STOP' if use_stop_class else None)
toks_filtered.append(tok)
span_text = []
if len(span_text) > 0:
parse_filtered.append(curr_tag)
toks_filtered.append(" ".join(span_text))
parse_filtered = [tag if tag is not None else toks_filtered[i] for i, tag in enumerate(parse_filtered)]
return parse_filtered, toks_filtered
NODES_TO_PRESERVE = ['NP', 'PP']
def tree_to_chunks(node):
if node['nodeType'] in NODES_TO_PRESERVE:
return [node['word']], [node['nodeType']]
if 'children' in node:
sub_tags = []
sub_words = []
for child in node['children']:
sub_parse = tree_to_chunks(child)
sub_words.extend(sub_parse[0])
sub_tags.extend(sub_parse[1])
for i in range(len(sub_words)):
if sub_words[i] in stopwords:
sub_tags[i] = sub_words[i]
return sub_words, sub_tags
return [node['word']], [node['nodeType']]
for split in splits:
# Check cache
# args.use_stop_class
# args.single_vocab
# args.pos_templates
# args.no_stopwords
# args.extended_stopwords
if args.dataset == 'wa-triples':
with jsonlines.open(os.path.join(args.data_dir, f'wikianswers-pp/{split}.jsonl')) as f:
all_clusters = [x for x in f]
else:
with jsonlines.open(os.path.join(args.data_dir, f'{args.dataset}/{split}.jsonl')) as f:
all_clusters = [x for x in f]
samples = []
rebuild_cache = True
cache_file = os.path.join(args.data_dir, f"{dataset}-cache/{cache_key}_{split}.json")
if os.path.exists(cache_file):
print("Loading from cache")
with open(cache_file) as f:
cache = json.load(f)
vocab_by_pos = cache["vocab_by_pos"]
parses = cache['parses']
tokenised = cache['tokenised']
if "qs_by_templ" in cache:
rebuild_cache = False
qs_by_templ = cache['qs_by_templ']
else:
print('Cache is missing qs_by_templ, rebuilding')
if rebuild_cache:
print("Cache file missing, building")
parses = []
tokenised = []
vocab_by_pos = defaultdict(Counter)
qs_by_templ = defaultdict(list)
for cix,cluster in enumerate(tqdm(all_clusters)):
cluster_parses = []
cluster_tokenised = []
for q in (cluster['qs'] if 'qs' in cluster else cluster['paraphrases']):
if args.constit_templates:
res = predictor.predict_batch_json(
inputs=[{'sentence': q}]
)
toks, parse = tree_to_chunks(res[0]['hierplane_tree']['root'])
else:
sent = Sentence(q)
tagger.predict(sent)
parse, toks = get_chunk_parse(sent, use_stop_class=args.use_stop_class)
# print([tok.labels[0].value for tok in sent.tokens])
# print(parse)
# print(toks)
# exit()
for ix in range(len(parse)):
vocab_key = 'KEY' if args.single_vocab else parse[ix]
vocab_by_pos[vocab_key][toks[ix]] += 1
cluster_parses.append(" ".join(parse))
cluster_tokenised.append(toks)
qs_by_templ[" ".join(parse)].append(q)
parses.append(cluster_parses)
tokenised.append(cluster_tokenised)
if cix > 1000 and DEBUG:
break
vocab_by_pos = {tag: [(w,count) for w,count in vocab.items() if count > 1] for tag,vocab in vocab_by_pos.items()}
vocab_by_pos = {tag: sorted(vocab, reverse=True, key=lambda x: x[1])[:5000] for tag,vocab in vocab_by_pos.items()}
vocab_by_pos_size = {tag: sum([x[1] for x in vocab]) for tag,vocab in vocab_by_pos.items()}
vocab_by_pos = {tag: [(x[0],x[1]/vocab_by_pos_size[tag]) for x in vocab] for tag,vocab in vocab_by_pos.items()}
os.makedirs(os.path.join(args.data_dir, f"{dataset}-cache/"), exist_ok=True)
with open(cache_file, "w") as f:
json.dump({
"vocab_by_pos": vocab_by_pos,
"parses": parses,
"tokenised": tokenised,
"qs_by_templ": qs_by_templ
}, f)
max_cluster_len = max([len(c['qs'] if 'qs' in c else c['paraphrases']) for c in all_clusters])
max_q_len = max([len(toks) for c in tokenised for toks in c])
max_vocab_size = max([len(voc) for voc in vocab_by_pos.values()])
noising_randoms = np.random.rand(len(all_clusters), SAMPLES_PER_CLUSTER, max_q_len)
replace_randoms = np.random.rand(len(all_clusters), SAMPLES_PER_CLUSTER, max_q_len)
dropout_randoms = np.random.rand(len(all_clusters), SAMPLES_PER_CLUSTER)
# replace_randoms = np.random.randint(0, max_vocab_size, size=(len(all_clusters), SAMPLES_PER_CLUSTER, max_q_len))
sample_randoms = np.random.randint(0, max_cluster_len, size=(len(all_clusters), SAMPLES_PER_CLUSTER, 4))
# num_samples = SAMPLES_PER_CLUSTER if split == 'train' else 1
for cix,row in enumerate(tqdm(all_clusters)):
cluster = row['qs'] if 'qs' in row else row['paraphrases']
sample_size = SAMPLES_PER_CLUSTER if args.resample_cluster else min(SAMPLES_PER_CLUSTER, len(cluster)-1)
for i in range(sample_size):
# tgt_ix, sem_ix = np.random.choice(len(cluster), replace=False, size=2)
tgt_ix, sem_ix, parse_ix, exemplar_ix = sample_randoms[cix][i]
tgt_ix = tgt_ix % len(cluster)
tgt_txt = cluster[tgt_ix]
if args.template_dropout > 0 and dropout_randoms[cix, i] < args.template_dropout:
tgt_parse = parses[cix][parse_ix % len(cluster)]
else:
tgt_parse = parses[cix][tgt_ix]
sem_options = [cluster[i] for i in range(len(cluster)) if parses[cix][tgt_ix] != parses[cix][i]]
if len(sem_options) > 0: # and args.use_diff_templ_for_sem
sem_ix = sem_ix % len(sem_options)
sem_text = sem_options[sem_ix]
else:
sem_ix = sem_ix % len(cluster)
sem_text = cluster[sem_ix]
toks = tokenised[cix][tgt_ix]
exemplar_options = []
if args.real_exemplars and tgt_parse in qs_by_templ:
exemplar_options = deepcopy(qs_by_templ[tgt_parse])
# remove any exemplar from this cluster
for q in cluster:
if q in exemplar_options:
exemplar_options.remove(q)
if len(exemplar_options) > 0:
syn_text = exemplar_options[exemplar_ix % len(exemplar_options)]
else:
# Build an exemplar by noising
syn_text = []
j=0
for tok, tag in zip(toks, tgt_parse.split(' ')):
if tag not in tags_to_preserve and tok.lower() not in stopwords and noising_randoms[cix,i,j] > KEEP_FRACTION:
# replacement = np.random.choice(list(vocab_by_pos[tag]))
vocab_key = 'KEY' if args.single_vocab else tag
if len(vocab_by_pos[vocab_key]) > 0:
options, probs = zip(*vocab_by_pos[vocab_key])
# repl_ix = replace_randoms[cix,i,j] % len(options)
cum_prob = 0
for k in range(len(probs)):
cum_prob += 1/len(probs) if args.uniform_sampling else probs[k]
if cum_prob > replace_randoms[cix,i,j]:
repl_ix = k
break
syn_text.append(options[repl_ix])
else:
syn_text.append(tok)
else:
syn_text.append(tok)
j += 1
syn_text = " ".join(syn_text)
samples.append({
'tgt': tgt_txt,
'sem_input': sem_text,
'syn_input': syn_text
})
# print(samples[-1])
# break
# break
if cix > 100 and DEBUG:
# print(vocab_by_pos)
# print(samples[:10])
break
# break
with jsonlines.open(f'{args.data_dir}/{name_slug}/{split}.jsonl','w') as f:
f.write_all(samples) | 15,891 | 37.293976 | 1,024 | py |
separator | separator-main/scripts/train_vq_code_predictor.py | # MLP code prediction
import argparse, json, os
parser = argparse.ArgumentParser(
description="MLP code prediction trainer",
)
parser.add_argument(
"--data_dir", type=str, default='./data/', help="Path to data folder"
)
parser.add_argument(
"--model_path", type=str, default='./runs/sep_ae/20201230_132811_vae_wa_6h_quantized_256_16qh_chunk-drop30/', help="Path to model folder"
)
parser.add_argument(
"--output_path", type=str, default='./runs/mlpcodepredictor/', help="Path to output folder"
)
parser.add_argument(
"--dataset", type=str, default='wikianswers', help="Which dataset?"
)
parser.add_argument("--train", action="store_true", help="Train mode")
parser.add_argument("--eval", action="store_true", help="Eval mode")
parser.add_argument("--test", action="store_true", help="Eval on test")
parser.add_argument(
"--lr", type=float, default=1e-4
)
parser.add_argument(
"--bsz", type=int, default=1024
)
parser.add_argument(
"--codebook_size", type=int, default=0
)
parser.add_argument(
"--hidden_dim", type=int, default=768*4
)
parser.add_argument(
"--num_steps", type=int, default=30001
)
args = parser.parse_args()
if args.dataset == 'wikianswers':
dataset_all = 'wikianswers-para-allqs'
dataset_clusters = 'wikianswers-pp'
dataset_geneval = 'wikianswers-para-splitforgeneval'
dataset_mlppredict = 'wikianswers-para-exemplarmlppredict'
elif args.dataset == 'qqp':
dataset_all = 'qqp-allqs'
dataset_clusters = 'qqp-clusters'
dataset_geneval = 'qqp-splitforgeneval'
dataset_mlppredict = 'qqp-exemplarmlppredict'
import torch
from torch.autograd import Variable
from tqdm import tqdm
from torchseq.utils.functions import onehot
from torchseq.utils.seed import set_seed
class MLPClassifier(torch.nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim, num_heads):
super(MLPClassifier, self).__init__()
self.linear = torch.nn.Linear(input_dim, input_dim*num_heads)
self.linear2 = torch.nn.Linear(input_dim*num_heads, input_dim*num_heads)
self.linear3 = torch.nn.Linear(input_dim*num_heads, output_dim*num_heads)
self.drop1 = torch.nn.Dropout(p=0.2)
self.drop2 = torch.nn.Dropout(p=0.2)
self.num_heads = num_heads
self.output_dim = output_dim
def forward(self, x):
outputs = self.drop1(torch.nn.functional.relu(self.linear(x)))
outputs = self.drop2(torch.nn.functional.relu(self.linear2(outputs)))
outputs = self.linear3(outputs)
return outputs.reshape(-1, self.num_heads, self.output_dim)
def predict(self, encodings):
vq_code = []
for X in encodings:
inputs = Variable(torch.tensor([X])).cuda()
outputs = model(inputs)[0]
predicted = torch.argmax(torch.softmax(outputs, -1), -1)
vq_codes.append(predicted[0].cpu().tolist())
return vq_codes
os.makedirs(args.output_path, exist_ok=True)
with open(args.output_path + '/config.json', 'w') as f:
json.dump(vars(args), f)
import numpy as np
import jsonlines, os
# Load encodings, data
MODEL_PATH = args.model_path
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_train.npy') or not os.path.exists(MODEL_PATH+f'/sep_encoding_1_dev.npy') or not os.path.exists(MODEL_PATH+f'/sep_encoding_1_test.npy'):
# generate encodings
print('Encoding cache not found - generating...')
import json, torch, jsonlines
from tqdm import tqdm
import numpy as np
from torchseq.agents.para_agent import ParaphraseAgent
from torchseq.datasets.json_loader import JsonDataLoader
from torchseq.utils.config import Config
with open(MODEL_PATH + "/config.json") as f:
cfg_dict = json.load(f)
# cfg_dict["task"] = "autoencoder"
cfg_dict["env"]["data_path"] = args.data_dir
cfg_dict["eval"]["sample_outputs"] = False
cfg_dict["training"]['batch_size'] = 24
cfg_dict["eval"]['eval_batch_size'] = 24
cfg_dict["training"]["dataset"] = 'json'
cfg_dict["training"]["shuffle_data"] = False
cfg_dict['json_dataset'] = {
"path": dataset_all,
"field_map": [
{
"type": "copy",
"from": "q",
"to": "s2"
},
{
"type": "copy",
"from": "q",
"to": "s1"
}
]
}
cfg_dict["bottleneck"]["prior_var_weight"] = 0.0
config = Config(cfg_dict)
checkpoint_path = MODEL_PATH
data_loader = JsonDataLoader(config)
instance = ParaphraseAgent(config=config, run_id=None, output_path="./runs/parademo/", silent=False, verbose=False)
if os.path.exists(os.path.join(MODEL_PATH, "orig_model.txt")):
with open(os.path.join(MODEL_PATH, "orig_model.txt")) as f:
chkpt_pth = f.readlines()[0]
checkpoint_path = chkpt_pth
else:
checkpoint_path = os.path.join(MODEL_PATH, "model", "checkpoint.pt")
instance.load_checkpoint(checkpoint_path)
instance.model.eval()
# Train
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_train.npy'):
_, _, _, memory_train = instance.inference(data_loader.train_loader, memory_keys_to_return=['sep_encoding_1', 'sep_encoding_2','vq_codes'])
torch.cuda.empty_cache()
for mem_key in ['sep_encoding_1', 'sep_encoding_2','vq_codes']:
np.save(MODEL_PATH+f'/{mem_key}_train.npy', memory_train[mem_key])
# Dev
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_dev.npy'):
_, _, _, memory_dev = instance.inference(data_loader.valid_loader, memory_keys_to_return=['sep_encoding_1', 'sep_encoding_2','vq_codes'])
torch.cuda.empty_cache()
for mem_key in ['sep_encoding_1', 'sep_encoding_2','vq_codes']:
np.save(MODEL_PATH+f'/{mem_key}_dev.npy', memory_dev[mem_key])
# Test
if not os.path.exists(MODEL_PATH+f'/sep_encoding_1_test.npy'):
_, _, _, memory_test = instance.inference(data_loader.test_loader, memory_keys_to_return=['sep_encoding_1', 'sep_encoding_2','vq_codes'])
torch.cuda.empty_cache()
for mem_key in ['sep_encoding_1', 'sep_encoding_2','vq_codes']:
np.save(MODEL_PATH+f'/{mem_key}_test.npy', memory_test[mem_key])
del instance
del data_loader
torch.cuda.empty_cache()
print('Encoding cache built')
# Now actually load the encodings
print('Loading encodings, data')
memory_train = {}
memory_dev = {}
memory_test = {}
for mem_key in ['sep_encoding_1', 'sep_encoding_2', 'vq_codes']:
memory_train[mem_key] = np.load(MODEL_PATH+f'/{mem_key}_train.npy')
if args.test:
memory_test[mem_key] = np.load(MODEL_PATH+f'/{mem_key}_test.npy')
else:
memory_dev[mem_key] = np.load(MODEL_PATH+f'/{mem_key}_dev.npy')
with jsonlines.open(os.path.join(args.data_dir, dataset_clusters, "train.jsonl")) as f:
train_qs = [row for row in f]
train_cluster_ixs = []
ix = 0
for cix, cluster in enumerate(train_qs):
clen = len(cluster['qs'])
for i in range(clen):
cluster_ixs = list(range(ix, ix+clen))
# if args.dataset != 'qqp':
cluster_ixs.remove(ix + i)
train_cluster_ixs.append(cluster_ixs)
ix += clen
with jsonlines.open(os.path.join(args.data_dir, dataset_clusters, "dev.jsonl")) as f:
dev_qs = [row for row in f]
dev_cluster_ixs = []
ix = 0
for cix, cluster in enumerate(dev_qs):
clen = len(cluster['qs'])
for i in range(clen):
cluster_ixs = list(range(ix, ix+clen))
# if args.dataset != 'qqp':
cluster_ixs.remove(ix + i)
dev_cluster_ixs.append(cluster_ixs)
ix += clen
import sys, gc
gc.collect()
# print('mem train', sum([x.nbytes for x in memory_train.values()])/1024**2)
# print('mem dev', sum([x.nbytes for x in memory_dev.values()])/1024**2)
# print('mem test', sum([x.nbytes for x in memory_test.values()])/1024**2)
# print('qs train', sys.getsizeof(train_qs)/1024**2)
# print('qs dev', sys.getsizeof(dev_qs)/1024**2)
# print('clusters train', sys.getsizeof(train_cluster_ixs)/1024**2)
# print('clusters dev', sys.getsizeof(dev_cluster_ixs)/1024**2)
print('Data and encodings loaded')
# from guppy import hpy;
# h=hpy()
# h.heap()
# Prepare datasets
print('Prepping dataset')
h_ix = 0
X = np.concatenate([memory_train['sep_encoding_1'][:, 0, :], memory_train['sep_encoding_2'][:, 0, :]], axis=1)
y = memory_train['vq_codes'][:, :, 0]
# print(y[:10, :])
# print(len(train_qs))
# print(X.shape)
# print(len(train_cluster_ixs))
# X_train_ixs = []
# y_train_ixs = []
# for src_ix, cluster in enumerate(train_cluster_ixs):
# for tgt_ix in cluster:
# X_train_ixs.append(src_ix)
# y_train_ixs.append(tgt_ix)
# X_dev_ixs = []
# y_dev_ixs = []
# for src_ix, cluster in enumerate(dev_cluster_ixs[:1000]):
# for tgt_ix in cluster:
# X_dev_ixs.append(src_ix)
# y_dev_ixs.append(tgt_ix)
if args.test:
# X_dev = memory_dev['sep_encoding_1'][:, 0, :]
X_test = np.concatenate([memory_test['sep_encoding_1'][:, 0, :], memory_test['sep_encoding_2'][:, 0, :]], axis=1)
y_test = memory_test['vq_codes'][:, :, 0]
else:
# X_dev = memory_dev['sep_encoding_1'][:, 0, :]
X_dev = np.concatenate([memory_dev['sep_encoding_1'][:, 0, :], memory_dev['sep_encoding_2'][:, 0, :]], axis=1)
y_dev = memory_dev['vq_codes'][:, :, 0]
print('Datasets prepped')
# Train the model
batch_size = args.bsz
NUM_STEPS = args.num_steps
NUM_HEADS = 4
input_dim = 768 * 4//4
output_dim = args.codebook_size
hidden_dim = args.hidden_dim
lr_rate = args.lr
set_seed(123)
model = MLPClassifier(input_dim, output_dim, hidden_dim, NUM_HEADS).cuda()
if args.train:
print('Training model...')
criterion = torch.nn.CrossEntropyLoss().cuda() # computes softmax and then the cross entropy
optimizer = torch.optim.Adam(model.parameters(), lr=lr_rate)
rand_ixs = np.random.randint(0, high=len(train_cluster_ixs), size=(NUM_STEPS, batch_size))
best_acc = 0
for iter in tqdm(range(NUM_STEPS)):
# batch_ixs = np.random.choice(len(train_cluster_ixs), size=batch_size)
model.train()
batch_ixs = rand_ixs[iter,:]
inputs = Variable(torch.tensor([X[ix] for ix in batch_ixs])).cuda()
# print([len(train_cluster_ixs[cix]) for cix in batch_ixs])
tgt = torch.where(torch.cat([torch.sum(torch.cat([onehot(torch.tensor(y[ix]), N=output_dim).unsqueeze(0) for ix in train_cluster_ixs[cix]], dim=0), dim=0, keepdims=True) for cix in batch_ixs], dim=0) > 0, 1, 0).cuda()
# tgt = Variable(tgt).cuda()
optimizer.zero_grad()
outputs = model(inputs)
# loss = criterion(outputs, labels)
loss = torch.sum(-1 * torch.nn.functional.log_softmax(outputs, dim=-1) * tgt/tgt.sum(dim=-1, keepdims=True), dim=-1).mean() #
loss.backward()
optimizer.step()
if iter%1000==0:
model.eval()
# calculate Accuracy
correct = 0
all_acc = 0
head_acc = [0] * NUM_HEADS
total = 0
for x_ix, cluster in enumerate(train_cluster_ixs[:10000]):
inputs = Variable(torch.tensor([X[x_ix]])).cuda()
labels = cluster
outputs = model(inputs)
predicted = torch.argmax(outputs.data, -1).cpu()
total+= inputs.size(0)
# for gpu, bring the predicted and labels back to cpu fro python operations to work
# print(predicted, [y[ix] for ix in cluster])
all_correct = True
for h_ix in range(NUM_HEADS):
this_corr = (predicted[0, h_ix] in [y[ix, h_ix] for ix in cluster])
correct+= 1.0 * this_corr
head_acc[h_ix] += 1.0 * this_corr
all_correct = all_correct & this_corr
all_acc += 1.0 * all_correct
accuracy = 100 * correct/(total*NUM_HEADS)
head_acc = [100*x/total for x in head_acc]
all_accuracy = 100 * all_acc/total
if accuracy > best_acc:
print('Saving...')
torch.save(model.state_dict(), args.output_path+'/code_predict.pt')
best_acc = accuracy
metrics = {
'acc': accuracy,
'full_acc': all_accuracy,
'head_acc': head_acc
}
with open(args.output_path + '/metrics.json', 'w') as f:
json.dump(metrics, f)
print("Iteration: {}. Loss: {}. Recall: {}. All Recall {}. PerHead Recall {}".format(iter, loss.item(), accuracy, all_accuracy, head_acc))
print('Training complete')
# Run inference
if args.eval or args.test:
split = 'test' if args.test else 'dev'
print('Generating exemplars')
import jsonlines, os, copy
from tqdm import tqdm
NUM_HEADS = 16
NUM_TEMPL_HEADS = 4
model.load_state_dict(torch.load(args.output_path+'/code_predict.pt'))
model.eval()
with jsonlines.open(os.path.join(args.data_dir, f"{dataset_geneval}/{split}.jsonl")) as f:
rows = [row for row in f]
q_to_ix = {}
ix = 0
with jsonlines.open(os.path.join(args.data_dir, f"{dataset_clusters}/{split}.jsonl")) as f:
dev_qs = [row for row in f]
for cix, cluster in enumerate(dev_qs):
for q in cluster['qs']:
q_to_ix[q] = ix
ix += 1
miss = 0
# os.makedirs(args.data_dir + '/wikianswers-para-exemplarmlppredict', exist_ok=True)
# with jsonlines.open(args.data_dir + '/wikianswers-para-exemplarmlppredict/dev.jsonl', 'w') as f:
# for ix, row in enumerate(tqdm(rows)):
# query_ix = q_to_ix[row['sem_input']]
# tgt_codes = [0] * (NUM_HEADS - NUM_TEMPL_HEADS)
# inputs = Variable(torch.tensor([X_dev[query_ix]])).cuda()
# outputs = model(inputs)
# predicted = torch.argmax(outputs.data, -1).cpu()
# gold = y_dev[ix]
# # print(predicted, gold)
# for h_ix in range(NUM_TEMPL_HEADS):
# tgt_codes.append(predicted[0, h_ix].item())
# this_row = copy.copy(row)
# this_row['vq_codes'] = tgt_codes
# f.write(this_row)
X_src = X_test if args.test else X_dev
os.makedirs(args.data_dir + '/' + dataset_mlppredict, exist_ok=True)
with jsonlines.open(args.data_dir + '/' + dataset_mlppredict +f'/{split}.jsonl', 'w') as f:
for ix, row in enumerate(tqdm(rows)):
query_ix = q_to_ix[row['sem_input']]
tgt_codes = [0] * (NUM_HEADS - NUM_TEMPL_HEADS)
inputs = Variable(torch.tensor([X_src[query_ix]])).cuda()
outputs = model(inputs)[0]
probs, predicted = torch.topk(torch.softmax(outputs, -1), 3 -1)
# print(predicted.shape, probs.shape)
# break
joint_probs = [([], 0)]
for h_ix in range(NUM_TEMPL_HEADS):
new_hypotheses = []
for i, (combo, prob) in enumerate(joint_probs):
for k in range(2):
new_hyp = [copy.copy(combo), prob]
new_hyp[0].append(predicted[h_ix, k].item())
new_hyp[1] += torch.log(probs[h_ix, k]).item()
new_hypotheses.append(new_hyp)
joint_probs = new_hypotheses
joint_probs = sorted(joint_probs, key=lambda x: x[1], reverse=True)[:3]
pred_codes = [tgt_codes + x[0] for x in sorted(joint_probs, key=lambda x: x[1], reverse=True)[:2]]
# pred_codes = predicted.transpose(1,0).tolist()
# pred_codes = [tgt_codes + codes for codes in pred_codes]
# print(pred_codes)
# exit()
# for h_ix in range(NUM_TEMPL_HEADS):
# tgt_codes.append(predicted[0, h_ix].item())
for codes in pred_codes:
this_row = copy.copy(row)
this_row['vq_codes'] = codes
f.write(this_row)
| 16,433 | 32.133065 | 225 | py |
linmix | linmix-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# linmix documentation build configuration file, created by
# sphinx-quickstart on Tue May 12 10:44:33 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
def __mul__(self, other):
return Mock()
def __rmul__(self, other):
return Mock()
def __pow__(self, other):
return Mock()
def __div__(self, other):
return Mock()
MOCK_MODULES = [
'numpy',
]
if on_rtd:
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
# 'sphinx.ext.viewcode',
# 'sphinxcontrib.napoleon',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'linmix'
copyright = u'2015, Joshua E. Meyers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0.dev1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'linmixdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'linmix.tex', u'linmix Documentation',
u'Joshua E. Meyers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'linmix', u'linmix Documentation',
[u'Joshua E. Meyers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'linmix', u'linmix Documentation',
u'Joshua E. Meyers', 'linmix', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 8,902 | 29.806228 | 98 | py |
leela-zero | leela-zero-master/training/elf/elf_convert.py | #!/usr/bin/env python3
import numpy as np
import sys
import torch
net = torch.load(sys.argv[1])
state = net['state_dict']
def tensor_to_str(t):
return ' '.join(map(str, np.array(t).flatten()))
def convert_block(t, name):
weight = np.array(t[name + '.0.weight'])
bias = np.array(t[name + '.0.bias'])
bn_gamma = np.array(t[name + '.1.weight'])
bn_beta = np.array(t[name + '.1.bias'])
bn_mean = np.array(t[name + '.1.running_mean'])
bn_var = np.array(t[name + '.1.running_var'])
# y1 = weight * x + bias
# y2 = gamma * (y1 - mean) / sqrt(var + e) + beta
# convolution: [out, in, x, y]
weight *= bn_gamma[:, np.newaxis, np.newaxis, np.newaxis]
bias = bn_gamma * bias + bn_beta * np.sqrt(bn_var + 1e-5)
bn_mean *= bn_gamma
return [weight, bias, bn_mean, bn_var]
def write_block(f, b):
for w in b:
f.write(' '.join(map(str, w.flatten())) + '\n')
if 0:
for key in state.keys():
print(key, state[key].shape)
with open('elf_converted_weights.txt', 'w') as f:
# version 2 means value head is for black, not for side to move
f.write('2\n')
if 'init_conv.0.weight' in state:
b = convert_block(state, 'init_conv')
else:
b = convert_block(state, 'init_conv.module')
# Permutate input planes
p = [0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15, 16, 17]
b[0] = b[0][:,p,:,:]
write_block(f, b)
for block in range(20):
b = convert_block(state, 'resnet.module.resnet.{}.conv_lower'.format(block))
write_block(f, b)
b = convert_block(state, 'resnet.module.resnet.{}.conv_upper'.format(block))
write_block(f, b)
b = convert_block(state, 'pi_final_conv')
write_block(f, b)
f.write(tensor_to_str(state['pi_linear.weight']) + '\n')
f.write(tensor_to_str(state['pi_linear.bias']) + '\n')
b = convert_block(state, 'value_final_conv')
write_block(f, b)
f.write(tensor_to_str(state['value_linear1.weight']) + '\n')
f.write(tensor_to_str(state['value_linear1.bias']) + '\n')
f.write(tensor_to_str(state['value_linear2.weight']) + '\n')
f.write(tensor_to_str(state['value_linear2.bias']) + '\n')
| 2,191 | 30.314286 | 84 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./train.py | import argparse
import importlib.util
import torch
from iharm.utils.exp import init_experiment
def main():
args = parse_args()
model_script = load_module(args.model_path)
cfg = init_experiment(args)
torch.backends.cudnn.benchmark = True
torch.multiprocessing.set_sharing_strategy('file_system')
model_script.main(cfg)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str,
help='Path to the model script.')
parser.add_argument('--exp-name', type=str, default='',
help='Here you can specify the name of the experiment. '
'It will be added as a suffix to the experiment folder.')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='Dataloader threads.')
parser.add_argument('--batch-size', type=int, default=-1,
help='You can override model batch size by specify positive number.')
parser.add_argument('--ngpus', type=int, default=1,
help='Number of GPUs. '
'If you only specify "--gpus" argument, the ngpus value will be calculated automatically. '
'You should use either this argument or "--gpus".')
parser.add_argument('--gpus', type=str, default='', required=False,
help='Ids of used GPUs. You should use either this argument or "--ngpus".')
parser.add_argument('--resume-exp', type=str, default=None,
help='The prefix of the name of the experiment to be continued. '
'If you use this field, you must specify the "--resume-prefix" argument.')
parser.add_argument('--resume-prefix', type=str, default='latest',
help='The prefix of the name of the checkpoint to be loaded.')
parser.add_argument('--start-epoch', type=int, default=0,
help='The number of the starting epoch from which training will continue. '
'(it is important for correct logging and learning rate)')
parser.add_argument('--weights', type=str, default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
parser.add_argument('--dataset_path', type=str, default=None,
help='')
parser.add_argument('--train_list', type=str, default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
parser.add_argument('--val_list', type=str, default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
return parser.parse_args()
def load_module(script_path):
spec = importlib.util.spec_from_file_location("model_script", script_path)
model_script = importlib.util.module_from_spec(spec)
spec.loader.exec_module(model_script)
return model_script
if __name__ == '__main__':
main()
| 3,105 | 36.878049 | 120 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./models/fixed256/improved_ssam.py | from functools import partial
import torch
from torchvision import transforms
from easydict import EasyDict as edict
from albumentations import HorizontalFlip, Resize, RandomResizedCrop
from iharm.data.compose import ComposeDataset
from iharm.data.hdataset import HDataset, MyDataset
from iharm.data.transforms import HCompose
from iharm.engine.simple_trainer import SimpleHTrainer
from iharm.model import initializer
from iharm.model.base import SSAMImageHarmonization
from iharm.model.losses import MaskWeightedMSE
from iharm.model.metrics import DenormalizedMSEMetric, DenormalizedPSNRMetric
from iharm.utils.log import logger
def main(cfg):
model, model_cfg = init_model(cfg)
train(model, cfg, model_cfg, start_epoch=cfg.start_epoch)
def init_model(cfg):
model_cfg = edict()
model_cfg.crop_size = (256, 256)
model_cfg.input_normalization = {
'mean': [.485, .456, .406],
'std': [.229, .224, .225]
}
model_cfg.depth = 4
model_cfg.input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(model_cfg.input_normalization['mean'], model_cfg.input_normalization['std']),
])
model = SSAMImageHarmonization(
depth=4, ch=32, image_fusion=True, attention_mid_k=0.5,
attend_from=2, batchnorm_from=2
)
model.to(cfg.device)
model.apply(initializer.XavierGluon(rnd_type='gaussian', magnitude=1.0))
return model, model_cfg
def train(model, cfg, model_cfg, start_epoch=0):
cfg.batch_size = 32 if cfg.batch_size < 1 else cfg.batch_size
cfg.val_batch_size = cfg.batch_size
cfg.input_normalization = model_cfg.input_normalization
crop_size = model_cfg.crop_size
loss_cfg = edict()
loss_cfg.pixel_loss = MaskWeightedMSE()
loss_cfg.pixel_loss_weight = 1.0
num_epochs = 120
train_augmentator = HCompose([
#RandomResizedCrop(*crop_size, scale=(0.5, 1.0)),
#HorizontalFlip()
Resize(*crop_size)
])
val_augmentator = HCompose([
Resize(*crop_size)
])
'''
trainset = ComposeDataset(
[
HDataset(cfg.HFLICKR_PATH, split='train'),
HDataset(cfg.HDAY2NIGHT_PATH, split='train'),
HDataset(cfg.HCOCO_PATH, split='train'),
HDataset(cfg.HADOBE5K_PATH, split='train'),
],
augmentator=train_augmentator,
input_transform=model_cfg.input_transform,
keep_background_prob=0.05,
)
valset = ComposeDataset(
[
HDataset(cfg.HFLICKR_PATH, split='test'),
HDataset(cfg.HDAY2NIGHT_PATH, split='test'),
HDataset(cfg.HCOCO_PATH, split='test'),
],
augmentator=val_augmentator,
input_transform=model_cfg.input_transform,
keep_background_prob=-1,
)
'''
trainset = ComposeDataset(
[
MyDataset(cfg.train_list, cfg.dataset_path),
],
augmentator=train_augmentator,
input_transform=model_cfg.input_transform,
keep_background_prob=0.05,
)
valset = ComposeDataset(
[
MyDataset(cfg.val_list, cfg.dataset_path),
],
augmentator=val_augmentator,
input_transform=model_cfg.input_transform,
keep_background_prob=-1,
)
optimizer_params = {
'lr': 1e-3,
'betas': (0.9, 0.999), 'eps': 1e-8
}
lr_scheduler = partial(torch.optim.lr_scheduler.MultiStepLR,
milestones=[105, 115], gamma=0.1)
trainer = SimpleHTrainer(
model, cfg, model_cfg, loss_cfg,
trainset, valset,
optimizer='adam',
optimizer_params=optimizer_params,
lr_scheduler=lr_scheduler,
metrics=[
DenormalizedPSNRMetric(
'images', 'target_images',
mean=torch.tensor(cfg.input_normalization['mean'], dtype=torch.float32).view(1, 3, 1, 1),
std=torch.tensor(cfg.input_normalization['std'], dtype=torch.float32).view(1, 3, 1, 1),
),
DenormalizedMSEMetric(
'images', 'target_images',
mean=torch.tensor(cfg.input_normalization['mean'], dtype=torch.float32).view(1, 3, 1, 1),
std=torch.tensor(cfg.input_normalization['std'], dtype=torch.float32).view(1, 3, 1, 1),
)
],
checkpoint_interval=10,
image_dump_interval=1000
)
logger.info(f'Starting Epoch: {start_epoch}')
logger.info(f'Total Epochs: {num_epochs}')
for epoch in range(start_epoch, num_epochs):
trainer.training(epoch)
if (epoch % 5 ==0):
trainer.validation(epoch)
| 4,667 | 29.913907 | 106 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/engine/simple_trainer.py | import os
import logging
from copy import deepcopy
from collections import defaultdict
import argparse
import cv2
import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torchvision.transforms import Normalize
from iharm.utils.log import logger, TqdmToLogger, SummaryWriterAvg
from iharm.utils.misc import save_checkpoint, load_weights
from .optimizer import get_optimizer
from flownet import *
from flownet.resample2d_package.resample2d import Resample2d
class SimpleHTrainer(object):
def __init__(self, model, cfg, model_cfg, loss_cfg,
trainset, valset,
optimizer='adam',
optimizer_params=None,
image_dump_interval=200,
checkpoint_interval=1,
tb_dump_period=25,
max_interactive_points=0,
lr_scheduler=None,
metrics=None,
additional_val_metrics=None,
net_inputs=('images', 'points')):
cfg.rgb_max = 255.0
cfg.fp16 = False
self.flow_net = FlowNet2(cfg, requires_grad=False)
checkpoint = torch.load("./pretrained_model/FlowNet2_checkpoint.pth.tar")
self.flow_net.load_state_dict(checkpoint['state_dict'])
self.flow_net = self.flow_net.to(cfg.device)
self.flow_warp = Resample2d()
self.flow_warp = self.flow_warp.to(cfg.device)
self.L2=torch.nn.MSELoss().to(cfg.device)
self.cfg = cfg
self.model_cfg = model_cfg
self.max_interactive_points = max_interactive_points
self.loss_cfg = loss_cfg
self.val_loss_cfg = deepcopy(loss_cfg)
self.tb_dump_period = tb_dump_period
self.net_inputs = net_inputs
if metrics is None:
metrics = []
self.train_metrics = metrics
self.val_metrics = deepcopy(metrics)
if additional_val_metrics is not None:
self.val_metrics.extend(additional_val_metrics)
self.checkpoint_interval = checkpoint_interval
self.image_dump_interval = image_dump_interval
self.task_prefix = ''
self.sw = None
self.trainset = trainset
self.valset = valset
self.train_data = DataLoader(
trainset, cfg.batch_size, shuffle=True,
drop_last=True, pin_memory=True,
num_workers=cfg.workers
)
self.val_data = DataLoader(
valset, cfg.val_batch_size, shuffle=False,
drop_last=True, pin_memory=True,
num_workers=cfg.workers
)
self.optim = get_optimizer(model, optimizer, optimizer_params)
logger.info(model)
self.device = cfg.device
self.net = model
self._load_weights()
if cfg.multi_gpu:
self.net = _CustomDP(self.net, device_ids=cfg.gpu_ids, output_device=cfg.gpu_ids[0])
self.net = self.net.to(self.device)
self.lr = optimizer_params['lr']
if lr_scheduler is not None:
self.lr_scheduler = lr_scheduler(optimizer=self.optim)
if cfg.start_epoch > 0:
for _ in range(cfg.start_epoch):
self.lr_scheduler.step()
else:
self.lr_scheduler = None
self.tqdm_out = TqdmToLogger(logger, level=logging.INFO)
if cfg.input_normalization:
mean = torch.tensor(cfg.input_normalization['mean'], dtype=torch.float32)
std = torch.tensor(cfg.input_normalization['std'], dtype=torch.float32)
self.denormalizator = Normalize((-mean / std), (1.0 / std))
else:
self.denormalizator = lambda x: x
def training(self, epoch):
if self.sw is None:
self.sw = SummaryWriterAvg(log_dir=str(self.cfg.LOGS_PATH),
flush_secs=10, dump_period=self.tb_dump_period)
log_prefix = 'Train' + self.task_prefix.capitalize()
tbar = tqdm(self.train_data, file=self.tqdm_out, ncols=100)
train_loss = 0.0
for metric in self.train_metrics:
metric.reset_epoch_stats()
self.net.train()
for i, batch_data in enumerate(tbar):
global_step = epoch * len(self.train_data) + i
loss, losses_logging, splitted_batch_data, outputs = \
self.batch_forward(batch_data)
self.optim.zero_grad()
loss.backward()
self.optim.step()
batch_loss = loss.item()
train_loss += batch_loss
for loss_name, loss_values in losses_logging.items():
self.sw.add_scalar(tag=f'{log_prefix}Losses/{loss_name}',
value=np.array(loss_values).mean(),
global_step=global_step)
self.sw.add_scalar(tag=f'{log_prefix}Losses/overall',
value=batch_loss,
global_step=global_step)
for k, v in self.loss_cfg.items():
if '_loss' in k and hasattr(v, 'log_states') and self.loss_cfg.get(k + '_weight', 0.0) > 0:
v.log_states(self.sw, f'{log_prefix}Losses/{k}', global_step)
if self.image_dump_interval > 0 and global_step % self.image_dump_interval == 0:
with torch.no_grad():
self.save_visualization(splitted_batch_data, outputs, global_step, prefix='train')
self.sw.add_scalar(tag=f'{log_prefix}States/learning_rate',
value=self.lr if self.lr_scheduler is None else self.lr_scheduler.get_lr()[-1],
global_step=global_step)
tbar.set_description(f'Epoch {epoch}, training loss {train_loss/(i+1):.6f}')
for metric in self.train_metrics:
metric.log_states(self.sw, f'{log_prefix}Metrics/{metric.name}', global_step)
if i % 1 == 0:
state_dict = self.net.module.state_dict() if self.cfg.multi_gpu else self.net.state_dict()
step_store_name = "epoch_{}_step_{}.pth".format(epoch, i)
step_store_name = self.cfg.CHECKPOINTS_PATH / step_store_name
torch.save(state_dict, str(step_store_name))
if (i + 1) % 30 == 0:
exit()
for metric in self.train_metrics:
self.sw.add_scalar(tag=f'{log_prefix}Metrics/epoch_{metric.name}',
value=metric.get_epoch_value(),
global_step=epoch, disable_avg=True)
save_checkpoint(self.net, self.cfg.CHECKPOINTS_PATH, prefix=self.task_prefix,
epoch=None, multi_gpu=self.cfg.multi_gpu)
if epoch % self.checkpoint_interval == 0:
save_checkpoint(self.net, self.cfg.CHECKPOINTS_PATH, prefix=self.task_prefix,
epoch=epoch, multi_gpu=self.cfg.multi_gpu)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
def validation(self, epoch):
if self.sw is None:
self.sw = SummaryWriterAvg(log_dir=str(self.cfg.LOGS_PATH),
flush_secs=10, dump_period=self.tb_dump_period)
log_prefix = 'Val' + self.task_prefix.capitalize()
tbar = tqdm(self.val_data, file=self.tqdm_out, ncols=100)
for metric in self.val_metrics:
metric.reset_epoch_stats()
num_batches = 0
val_loss = 0
losses_logging = defaultdict(list)
self.net.eval()
for i, batch_data in enumerate(tbar):
global_step = epoch * len(self.val_data) + i
loss, batch_losses_logging, splitted_batch_data, outputs = \
self.batch_forward(batch_data, validation=True)
for loss_name, loss_values in batch_losses_logging.items():
losses_logging[loss_name].extend(loss_values)
batch_loss = loss.item()
val_loss += batch_loss
num_batches += 1
tbar.set_description(f'Epoch {epoch}, validation loss: {val_loss/num_batches:.6f}')
for metric in self.val_metrics:
metric.log_states(self.sw, f'{log_prefix}Metrics/{metric.name}', global_step)
for loss_name, loss_values in losses_logging.items():
self.sw.add_scalar(tag=f'{log_prefix}Losses/{loss_name}', value=np.array(loss_values).mean(),
global_step=epoch, disable_avg=True)
for metric in self.val_metrics:
self.sw.add_scalar(tag=f'{log_prefix}Metrics/epoch_{metric.name}', value=metric.get_epoch_value(),
global_step=epoch, disable_avg=True)
self.sw.add_scalar(tag=f'{log_prefix}Losses/overall', value=val_loss / num_batches,
global_step=epoch, disable_avg=True)
def batch_forward(self, batch_data, validation=False):
metrics = self.val_metrics if validation else self.train_metrics
losses_logging = defaultdict(list)
with torch.set_grad_enabled(not validation):
batch_data = {k: v.to(self.device) for k, v in batch_data.items()}
images, masks = batch_data['images'], batch_data['masks']
images_pre, masks_pre = batch_data['images_pre'], batch_data['masks_pre']
output = self.net(images, masks)
output_pre=self.net(images_pre, masks_pre)
# print(type(output_pre))
# print(output_pre.keys())
with torch.no_grad():
flow_i21 = self.flow_net(images, images_pre)
output2 = self.flow_warp(output_pre['images'], flow_i21)
loss = 0.0
loss = self.add_loss('pixel_loss', loss, losses_logging, validation, output, batch_data)
loss+=0.01*self.L2(output['images'],output2)
with torch.no_grad():
for metric in metrics:
metric.update(
*(output.get(x) for x in metric.pred_outputs),
*(batch_data[x] for x in metric.gt_outputs)
)
return loss, losses_logging, batch_data, output
def add_loss(self, loss_name, total_loss, losses_logging, validation, net_outputs, batch_data):
loss_cfg = self.loss_cfg if not validation else self.val_loss_cfg
loss_weight = loss_cfg.get(loss_name + '_weight', 0.0)
if loss_weight > 0.0:
loss_criterion = loss_cfg.get(loss_name)
loss = loss_criterion(*(net_outputs.get(x) for x in loss_criterion.pred_outputs),
*(batch_data[x] for x in loss_criterion.gt_outputs))
loss = torch.mean(loss)
losses_logging[loss_name].append(loss.item())
loss = loss_weight * loss
total_loss = total_loss + loss
return total_loss
def save_visualization(self, splitted_batch_data, outputs, global_step, prefix):
output_images_path = self.cfg.VIS_PATH / prefix
if self.task_prefix:
output_images_path /= self.task_prefix
if not output_images_path.exists():
output_images_path.mkdir(parents=True)
image_name_prefix = f'{global_step:06d}'
def _save_image(suffix, image):
cv2.imwrite(
str(output_images_path / f'{image_name_prefix}_{suffix}.jpg'),
image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
images = splitted_batch_data['images']
target_images = splitted_batch_data['target_images']
object_masks = splitted_batch_data['masks']
image, target_image, object_mask = images[0], target_images[0], object_masks[0, 0]
image = (self.denormalizator(image).cpu().numpy() * 255).transpose((1, 2, 0))
target_image = (self.denormalizator(target_image).cpu().numpy() * 255).transpose((1, 2, 0))
object_mask = np.repeat((object_mask.cpu().numpy() * 255)[:, :, np.newaxis], axis=2, repeats=3)
predicted_image = (self.denormalizator(outputs['images'].detach()[0]).cpu().numpy() * 255).transpose((1, 2, 0))
predicted_image = np.clip(predicted_image, 0, 255)
viz_image = np.hstack((image, object_mask, target_image, predicted_image)).astype(np.uint8)
_save_image('reconstruction', viz_image[:, :, ::-1])
def _load_weights(self):
if self.cfg.weights is not None:
if os.path.isfile(self.cfg.weights):
load_weights(self.net, self.cfg.weights, verbose=True)
self.cfg.weights = None
else:
raise RuntimeError(f"=> no checkpoint found at '{self.cfg.weights}'")
elif self.cfg.resume_exp is not None:
print(self.cfg.resume_exp, self.cfg.CHECKPOINTS_PATH)
checkpoints = list(self.cfg.CHECKPOINTS_PATH.glob(f'{self.cfg.resume_prefix}*.pth'))
assert len(checkpoints) == 1
checkpoint_path = checkpoints[0]
print("load from", checkpoint_path)
load_weights(self.net, str(checkpoint_path), verbose=True)
self.net = self.net.to(self.device)
class _CustomDP(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
| 13,411 | 40.9125 | 119 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/engine/optimizer.py | import torch
import math
from iharm.utils.log import logger
def get_optimizer(model, opt_name, opt_kwargs):
params = []
base_lr = opt_kwargs['lr']
for name, param in model.named_parameters():
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
if not math.isclose(getattr(param, 'lr_mult', 1.0), 1.0):
logger.info(f'Applied lr_mult={param.lr_mult} to "{name}" parameter.')
param_group['lr'] = param_group.get('lr', base_lr) * param.lr_mult
params.append(param_group)
optimizer = {
'sgd': torch.optim.SGD,
'adam': torch.optim.Adam,
'adamw': torch.optim.AdamW
}[opt_name.lower()](params, **opt_kwargs)
return optimizer
| 797 | 27.5 | 82 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/utils/exp.py | import os
import sys
import shutil
import pprint
from pathlib import Path
from datetime import datetime
import yaml
import torch
from easydict import EasyDict as edict
from .log import logger, add_new_file_output_to_logger
def init_experiment(args):
model_path = Path(args.model_path)
ftree = get_model_family_tree(model_path)
if ftree is None:
print('Models can only be located in the "models" directory in the root of the repository')
sys.exit(1)
cfg = load_config(model_path)
update_config(cfg, args)
experiments_path = Path(cfg.EXPS_PATH)
exp_parent_path = experiments_path / '/'.join(ftree)
exp_parent_path.mkdir(parents=True, exist_ok=True)
if cfg.resume_exp:
exp_path = find_resume_exp(exp_parent_path, cfg.resume_exp)
else:
last_exp_indx = find_last_exp_indx(exp_parent_path)
exp_name = f'{last_exp_indx:03d}'
if cfg.exp_name:
exp_name += '_' + cfg.exp_name
exp_path = exp_parent_path / exp_name
exp_path.mkdir(parents=True)
cfg.EXP_PATH = exp_path
cfg.CHECKPOINTS_PATH = exp_path / 'checkpoints'
cfg.VIS_PATH = exp_path / 'vis'
cfg.LOGS_PATH = exp_path / 'logs'
cfg.LOGS_PATH.mkdir(exist_ok=True)
cfg.CHECKPOINTS_PATH.mkdir(exist_ok=True)
cfg.VIS_PATH.mkdir(exist_ok=True)
dst_script_path = exp_path / (model_path.stem + datetime.strftime(datetime.today(), '_%Y-%m-%d-%H-%M-%S.py'))
shutil.copy(model_path, dst_script_path)
if cfg.gpus != '':
gpu_ids = [int(id) for id in cfg.gpus.split(',')]
else:
gpu_ids = list(range(cfg.ngpus))
cfg.gpus = ','.join([str(id) for id in gpu_ids])
cfg.gpu_ids = gpu_ids
cfg.ngpus = len(gpu_ids)
cfg.multi_gpu = cfg.ngpus > 1
if cfg.multi_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.gpus
ngpus = torch.cuda.device_count()
assert ngpus == cfg.ngpus
cfg.device = torch.device(f'cuda:{cfg.gpu_ids[0]}')
add_new_file_output_to_logger(cfg.LOGS_PATH, prefix='train_')
logger.info(f'Number of GPUs: {len(cfg.gpu_ids)}')
logger.info('Run experiment with config:')
logger.info(pprint.pformat(cfg, indent=4))
return cfg
def get_model_family_tree(model_path, terminate_name='models'):
model_name = model_path.stem
family_tree = [model_name]
for x in model_path.parents:
if x.stem == terminate_name:
break
family_tree.append(x.stem)
else:
return None
return family_tree[::-1]
def find_last_exp_indx(exp_parent_path):
indx = 0
for x in exp_parent_path.iterdir():
if not x.is_dir():
continue
exp_name = x.stem
if exp_name[:3].isnumeric():
indx = max(indx, int(exp_name[:3]) + 1)
return indx
def find_resume_exp(exp_parent_path, exp_pattern):
candidates = sorted(exp_parent_path.glob(f'{exp_pattern}*'))
if len(candidates) == 0:
print(f'No experiments could be found that satisfies the pattern = "*{exp_pattern}"')
sys.exit(1)
elif len(candidates) > 1:
print('More than one experiment found:')
for x in candidates:
print(x)
sys.exit(1)
else:
exp_path = candidates[0]
print(f'Continue with experiment "{exp_path}"')
return exp_path
def update_config(cfg, args):
for param_name, value in vars(args).items():
if param_name.lower() in cfg or param_name.upper() in cfg:
continue
cfg[param_name] = value
def load_config(model_path):
model_name = model_path.stem
config_path = model_path.parent / (model_name + '.yml')
if config_path.exists():
cfg = load_config_file(config_path)
else:
cfg = dict()
cwd = Path.cwd()
config_parent = config_path.parent.absolute()
while len(config_parent.parents) > 0:
config_path = config_parent / 'config.yml'
if config_path.exists():
local_config = load_config_file(config_path, model_name=model_name)
cfg.update({k: v for k, v in local_config.items() if k not in cfg})
if config_parent.absolute() == cwd:
break
config_parent = config_parent.parent
return edict(cfg)
def load_config_file(config_path, model_name=None, return_edict=False):
with open(config_path, 'r') as f:
cfg = yaml.safe_load(f)
if 'SUBCONFIGS' in cfg:
if model_name is not None and model_name in cfg['SUBCONFIGS']:
cfg.update(cfg['SUBCONFIGS'][model_name])
del cfg['SUBCONFIGS']
return edict(cfg) if return_edict else cfg
| 4,641 | 27.832298 | 113 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/utils/misc.py | import torch
from .log import logger
def get_dims_with_exclusion(dim, exclude=None):
dims = list(range(dim))
if exclude is not None:
dims.remove(exclude)
return dims
def save_checkpoint(net, checkpoints_path, epoch=None, prefix='', verbose=True, multi_gpu=False):
if epoch is None:
checkpoint_name = 'last_checkpoint.pth'
else:
checkpoint_name = f'{epoch:03d}.pth'
if prefix:
checkpoint_name = f'{prefix}_{checkpoint_name}'
if not checkpoints_path.exists():
checkpoints_path.mkdir(parents=True)
checkpoint_path = checkpoints_path / checkpoint_name
if verbose:
logger.info(f'Save checkpoint to {str(checkpoint_path)}')
state_dict = net.module.state_dict() if multi_gpu else net.state_dict()
torch.save(state_dict, str(checkpoint_path))
def load_weights(model, path_to_weights, verbose=False):
if verbose:
logger.info(f'Load checkpoint from path: {path_to_weights}')
current_state_dict = model.state_dict()
new_state_dict = torch.load(str(path_to_weights), map_location='cpu')
current_state_dict.update(new_state_dict)
model.load_state_dict(current_state_dict)
| 1,192 | 27.404762 | 97 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/utils/log.py | import io
import time
import logging
from datetime import datetime
import numpy as np
from torch.utils.tensorboard import SummaryWriter
LOGGER_NAME = 'root'
LOGGER_DATEFMT = '%Y-%m-%d %H:%M:%S'
handler = logging.StreamHandler()
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def add_new_file_output_to_logger(logs_path, prefix, only_message=False):
log_name = prefix + datetime.strftime(datetime.today(), '%Y-%m-%d_%H-%M-%S') + '.log'
logs_path.mkdir(exist_ok=True, parents=True)
stdout_log_path = logs_path / log_name
fh = logging.FileHandler(str(stdout_log_path))
fmt = '%(message)s' if only_message else '(%(levelname)s) %(asctime)s: %(message)s'
formatter = logging.Formatter(fmt=fmt, datefmt=LOGGER_DATEFMT)
fh.setFormatter(formatter)
logger.addHandler(fh)
class TqdmToLogger(io.StringIO):
logger = None
level = None
buf = ''
def __init__(self, logger, level=None, mininterval=5):
super(TqdmToLogger, self).__init__()
self.logger = logger
self.level = level or logging.INFO
self.mininterval = mininterval
self.last_time = 0
def write(self, buf):
self.buf = buf.strip('\r\n\t ')
def flush(self):
if len(self.buf) > 0 and time.time() - self.last_time > self.mininterval:
self.logger.log(self.level, self.buf)
self.last_time = time.time()
class SummaryWriterAvg(SummaryWriter):
def __init__(self, *args, dump_period=20, **kwargs):
super().__init__(*args, **kwargs)
self._dump_period = dump_period
self._avg_scalars = dict()
def add_scalar(self, tag, value, global_step=None, disable_avg=False):
if disable_avg or isinstance(value, (tuple, list, dict)):
super().add_scalar(tag, np.array(value), global_step=global_step)
else:
if tag not in self._avg_scalars:
self._avg_scalars[tag] = ScalarAccumulator(self._dump_period)
avg_scalar = self._avg_scalars[tag]
avg_scalar.add(value)
if avg_scalar.is_full():
super().add_scalar(tag, avg_scalar.value,
global_step=global_step)
avg_scalar.reset()
class ScalarAccumulator(object):
def __init__(self, period):
self.sum = 0
self.cnt = 0
self.period = period
def add(self, value):
self.sum += value
self.cnt += 1
@property
def value(self):
if self.cnt > 0:
return self.sum / self.cnt
else:
return 0
def reset(self):
self.cnt = 0
self.sum = 0
def is_full(self):
return self.cnt >= self.period
def __len__(self):
return self.cnt
| 2,809 | 27.1 | 89 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/data/base.py | import random
import numpy as np
import torch
class BaseHDataset(torch.utils.data.dataset.Dataset):
def __init__(self,
augmentator=None,
input_transform=None,
keep_background_prob=0.0,
with_image_info=False,
epoch_len=-1):
super(BaseHDataset, self).__init__()
self.epoch_len = epoch_len
self.input_transform = input_transform
self.augmentator = augmentator
self.keep_background_prob = keep_background_prob
self.with_image_info = with_image_info
if input_transform is None:
input_transform = lambda x: x
print(input_transform)
self.input_transform = input_transform
self.dataset_samples = None
def __getitem__(self, index):
if self.epoch_len > 0:
index = random.randrange(0, len(self.dataset_samples))
# print(111111111111111111111111)
sample = self.get_sample(index)
# print(222222222222222222222222222222)
self.check_sample_types(sample)
# print(33333333333333333333333333333)
sample = self.augment_sample(sample)
# print(444444444444444444444444)
# print(2222222222222222222222222222222)
# print(sample['image_pre'].shape,sample['object_mask_pre'].shape,sample['image'].shape,sample['target_image'].shape,sample['object_mask'].shape)
image = self.input_transform(sample['image'])
target_image = self.input_transform(sample['target_image'])
obj_mask = sample['object_mask'].astype(np.float32)
# print(33333333333333333333333333333333)
image_pre = self.input_transform(sample['image_pre'])
obj_mask_pre = sample['object_mask_pre'].astype(np.float32)
# print(44444444444444444444444444444)
# print(image_pre.shape,image.shape)
output = {
'images_pre': image_pre,
'masks_pre': obj_mask_pre[np.newaxis, ...].astype(np.float32),
'images': image,
'masks': obj_mask[np.newaxis, ...].astype(np.float32),
'target_images': target_image
}
if self.with_image_info and 'image_id' in sample:
output['image_info'] = sample['image_id']
return output
def check_sample_types(self, sample):
assert sample['image'].dtype == 'uint8'
if 'target_image' in sample:
assert sample['target_image'].dtype == 'uint8'
def augment_sample(self, sample):
if self.augmentator is None:
return sample
# print(self.augmentator.additional_targets.keys())
additional_targets = {target_name: sample[target_name]
for target_name in self.augmentator.additional_targets.keys()}
# print(additional_targets.keys())
valid_augmentation = False
while not valid_augmentation:
aug_output = self.augmentator(image=sample['image'], **additional_targets)
valid_augmentation = self.check_augmented_sample(sample, aug_output)
for target_name, transformed_target in aug_output.items():
# print(target_name)
sample[target_name] = transformed_target
return sample
def check_augmented_sample(self, sample, aug_output):
if self.keep_background_prob < 0.0 or random.random() < self.keep_background_prob:
return True
return aug_output['object_mask'].sum() > 1.0
def get_sample(self, index):
raise NotImplementedError
def __len__(self):
if self.epoch_len > 0:
return self.epoch_len
else:
return len(self.dataset_samples)
| 3,701 | 35.653465 | 153 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/losses.py | import torch
import torch.nn as nn
from iharm.utils import misc
class Loss(nn.Module):
def __init__(self, pred_outputs, gt_outputs):
super().__init__()
self.pred_outputs = pred_outputs
self.gt_outputs = gt_outputs
class MSE(Loss):
def __init__(self, pred_name='images', gt_image_name='target_images'):
super(MSE, self).__init__(pred_outputs=(pred_name,), gt_outputs=(gt_image_name,))
def forward(self, pred, label):
label = label.view(pred.size())
loss = torch.mean((pred - label) ** 2, dim=misc.get_dims_with_exclusion(label.dim(), 0))
return loss
class MaskWeightedMSE(Loss):
def __init__(self, min_area=1000.0, pred_name='images',
gt_image_name='target_images', gt_mask_name='masks'):
super(MaskWeightedMSE, self).__init__(pred_outputs=(pred_name, ),
gt_outputs=(gt_image_name, gt_mask_name))
self.min_area = min_area
def forward(self, pred, label, mask):
label = label.view(pred.size())
reduce_dims = misc.get_dims_with_exclusion(label.dim(), 0)
loss = (pred - label) ** 2
delimeter = pred.size(1) * torch.clamp_min(torch.sum(mask, dim=reduce_dims), self.min_area)
loss = torch.sum(loss, dim=reduce_dims) / delimeter
return loss
| 1,347 | 32.7 | 99 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/metrics.py | import torch
import torch.nn.functional as F
class TrainMetric(object):
def __init__(self, pred_outputs, gt_outputs, epsilon=1e-6):
self.pred_outputs = pred_outputs
self.gt_outputs = gt_outputs
self.epsilon = epsilon
self._last_batch_metric = 0.0
self._epoch_metric_sum = 0.0
self._epoch_batch_count = 0
def compute(self, *args, **kwargs):
raise NotImplementedError
def update(self, *args, **kwargs):
self._last_batch_metric = self.compute(*args, **kwargs)
self._epoch_metric_sum += self._last_batch_metric
self._epoch_batch_count += 1
def get_epoch_value(self):
if self._epoch_batch_count > 0:
return self._epoch_metric_sum / self._epoch_batch_count
else:
return 0.0
def reset_epoch_stats(self):
self._epoch_metric_sum = 0.0
self._epoch_batch_count = 0
def log_states(self, sw, tag_prefix, global_step):
sw.add_scalar(tag=tag_prefix, value=self._last_batch_metric, global_step=global_step)
@property
def name(self):
return type(self).__name__
class PSNRMetric(TrainMetric):
def __init__(self, pred_output='instances', gt_output='instances'):
super(PSNRMetric, self).__init__((pred_output, ), (gt_output, ))
def compute(self, pred, gt):
mse = F.mse_loss(pred, gt)
squared_max = gt.max() ** 2
psnr = 10 * torch.log10(squared_max / (mse + self.epsilon))
return psnr.item()
class DenormalizedTrainMetric(TrainMetric):
def __init__(self, pred_outputs, gt_outputs, mean=None, std=None):
super(DenormalizedTrainMetric, self).__init__(pred_outputs, gt_outputs)
self.mean = torch.zeros(1) if mean is None else mean
self.std = torch.ones(1) if std is None else std
self.device = None
def init_device(self, input_device):
if self.device is None:
self.device = input_device
self.mean = self.mean.to(self.device)
self.std = self.std.to(self.device)
def denormalize(self, tensor):
self.init_device(tensor.device)
return tensor * self.std + self.mean
def update(self, *args, **kwargs):
self._last_batch_metric = self.compute(*args, **kwargs)
self._epoch_metric_sum += self._last_batch_metric
self._epoch_batch_count += 1
class DenormalizedPSNRMetric(DenormalizedTrainMetric):
def __init__(
self,
pred_output='instances', gt_output='instances',
mean=None, std=None,
):
super(DenormalizedPSNRMetric, self).__init__((pred_output, ), (gt_output, ), mean, std)
def compute(self, pred, gt):
denormalized_pred = torch.clamp(self.denormalize(pred), 0, 1)
denormalized_gt = self.denormalize(gt)
return PSNRMetric.compute(self, denormalized_pred, denormalized_gt)
class DenormalizedMSEMetric(DenormalizedTrainMetric):
def __init__(
self,
pred_output='instances', gt_output='instances',
mean=None, std=None,
):
super(DenormalizedMSEMetric, self).__init__((pred_output, ), (gt_output, ), mean, std)
def compute(self, pred, gt):
denormalized_pred = self.denormalize(pred) * 255
denormalized_gt = self.denormalize(gt) * 255
return F.mse_loss(denormalized_pred, denormalized_gt).item()
| 3,379 | 32.8 | 95 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/ops.py | import torch
from torch import nn as nn
class SimpleInputFusion(nn.Module):
def __init__(self, add_ch=1, rgb_ch=3, ch=8, norm_layer=nn.BatchNorm2d):
super(SimpleInputFusion, self).__init__()
self.fusion_conv = nn.Sequential(
nn.Conv2d(in_channels=add_ch + rgb_ch, out_channels=ch, kernel_size=1),
nn.LeakyReLU(negative_slope=0.2),
norm_layer(ch),
nn.Conv2d(in_channels=ch, out_channels=rgb_ch, kernel_size=1),
)
def forward(self, image, additional_input):
return self.fusion_conv(torch.cat((image, additional_input), dim=1))
class ChannelAttention(nn.Module):
def __init__(self, in_channels):
super(ChannelAttention, self).__init__()
self.global_pools = nn.ModuleList([
nn.AdaptiveAvgPool2d(1),
nn.AdaptiveMaxPool2d(1),
])
intermediate_channels_count = max(in_channels // 16, 8)
self.attention_transform = nn.Sequential(
nn.Linear(len(self.global_pools) * in_channels, intermediate_channels_count),
nn.ReLU(),
nn.Linear(intermediate_channels_count, in_channels),
nn.Sigmoid(),
)
def forward(self, x):
pooled_x = []
for global_pool in self.global_pools:
pooled_x.append(global_pool(x))
pooled_x = torch.cat(pooled_x, dim=1).flatten(start_dim=1)
channel_attention_weights = self.attention_transform(pooled_x)[..., None, None]
return channel_attention_weights * x
class MaskedChannelAttention(nn.Module):
def __init__(self, in_channels, *args, **kwargs):
super(MaskedChannelAttention, self).__init__()
self.global_max_pool = MaskedGlobalMaxPool2d()
self.global_avg_pool = FastGlobalAvgPool2d()
intermediate_channels_count = max(in_channels // 16, 8)
self.attention_transform = nn.Sequential(
nn.Linear(3 * in_channels, intermediate_channels_count),
nn.ReLU(inplace=True),
nn.Linear(intermediate_channels_count, in_channels),
nn.Sigmoid(),
)
def forward(self, x, mask):
if mask.shape[2:] != x.shape[:2]:
mask = nn.functional.interpolate(
mask, size=x.size()[-2:],
mode='bilinear', align_corners=True
)
pooled_x = torch.cat([
self.global_max_pool(x, mask),
self.global_avg_pool(x)
], dim=1)
channel_attention_weights = self.attention_transform(pooled_x)[..., None, None]
return channel_attention_weights * x
class MaskedGlobalMaxPool2d(nn.Module):
def __init__(self):
super().__init__()
self.global_max_pool = FastGlobalMaxPool2d()
def forward(self, x, mask):
return torch.cat((
self.global_max_pool(x * mask),
self.global_max_pool(x * (1.0 - mask))
), dim=1)
class FastGlobalAvgPool2d(nn.Module):
def __init__(self):
super(FastGlobalAvgPool2d, self).__init__()
def forward(self, x):
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
class FastGlobalMaxPool2d(nn.Module):
def __init__(self):
super(FastGlobalMaxPool2d, self).__init__()
def forward(self, x):
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).max(dim=2)[0]
class ScaleLayer(nn.Module):
def __init__(self, init_value=1.0, lr_mult=1):
super().__init__()
self.lr_mult = lr_mult
self.scale = nn.Parameter(
torch.full((1,), init_value / lr_mult, dtype=torch.float32)
)
def forward(self, x):
scale = torch.abs(self.scale * self.lr_mult)
return x * scale
class FeaturesConnector(nn.Module):
def __init__(self, mode, in_channels, feature_channels, out_channels):
super(FeaturesConnector, self).__init__()
self.mode = mode if feature_channels else ''
if self.mode == 'catc':
self.reduce_conv = nn.Conv2d(in_channels + feature_channels, out_channels, kernel_size=1)
elif self.mode == 'sum':
self.reduce_conv = nn.Conv2d(feature_channels, out_channels, kernel_size=1)
self.output_channels = out_channels if self.mode != 'cat' else in_channels + feature_channels
def forward(self, x, features):
if self.mode == 'cat':
return torch.cat((x, features), 1)
if self.mode == 'catc':
return self.reduce_conv(torch.cat((x, features), 1))
if self.mode == 'sum':
return self.reduce_conv(features) + x
return x
def extra_repr(self):
return self.mode
| 4,695 | 32.784173 | 101 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/initializer.py | import torch
import torch.nn as nn
import numpy as np
class Initializer(object):
def __init__(self, local_init=True, gamma=None):
self.local_init = local_init
self.gamma = gamma
def __call__(self, m):
if getattr(m, '__initialized', False):
return
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.GroupNorm, nn.SyncBatchNorm)) or 'BatchNorm' in m.__class__.__name__:
if m.weight is not None:
self._init_gamma(m.weight.data)
if m.bias is not None:
self._init_beta(m.bias.data)
else:
if getattr(m, 'weight', None) is not None:
self._init_weight(m.weight.data)
if getattr(m, 'bias', None) is not None:
self._init_bias(m.bias.data)
if self.local_init:
object.__setattr__(m, '__initialized', True)
def _init_weight(self, data):
nn.init.uniform_(data, -0.07, 0.07)
def _init_bias(self, data):
nn.init.constant_(data, 0)
def _init_gamma(self, data):
if self.gamma is None:
nn.init.constant_(data, 1.0)
else:
nn.init.normal_(data, 1.0, self.gamma)
def _init_beta(self, data):
nn.init.constant_(data, 0)
class Bilinear(Initializer):
def __init__(self, scale, groups, in_channels, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.groups = groups
self.in_channels = in_channels
def _init_weight(self, data):
"""Reset the weight and bias."""
bilinear_kernel = self.get_bilinear_kernel(self.scale)
weight = torch.zeros_like(data)
for i in range(self.in_channels):
if self.groups == 1:
j = i
else:
j = 0
weight[i, j] = bilinear_kernel
data[:] = weight
@staticmethod
def get_bilinear_kernel(scale):
"""Generate a bilinear upsampling kernel."""
kernel_size = 2 * scale - scale % 2
scale = (kernel_size + 1) // 2
center = scale - 0.5 * (1 + kernel_size % 2)
og = np.ogrid[:kernel_size, :kernel_size]
kernel = (1 - np.abs(og[0] - center) / scale) * (1 - np.abs(og[1] - center) / scale)
return torch.tensor(kernel, dtype=torch.float32)
class XavierGluon(Initializer):
def __init__(self, rnd_type='uniform', factor_type='avg', magnitude=3, **kwargs):
super().__init__(**kwargs)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, arr):
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(arr)
if self.factor_type == 'avg':
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == 'in':
factor = fan_in
elif self.factor_type == 'out':
factor = fan_out
else:
raise ValueError('Incorrect factor type')
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == 'uniform':
nn.init.uniform_(arr, -scale, scale)
elif self.rnd_type == 'gaussian':
nn.init.normal_(arr, 0, scale)
else:
raise ValueError('Unknown random type')
| 3,408 | 31.160377 | 98 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/syncbn/modules/nn/syncbn.py | """
/*****************************************************************************/
BatchNorm2dSync with multi-gpu
/*****************************************************************************/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
# python 3
from queue import Queue
except ImportError:
# python 2
from Queue import Queue
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from iharm.model.syncbn.modules.functional import batchnorm2d_sync
class _BatchNorm(nn.Module):
"""
Customized BatchNorm from nn.BatchNorm
>> added freeze attribute to enable bn freeze.
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(_BatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.freezed = False
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.reset_parameters()
def reset_parameters(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
return NotImplemented
def forward(self, input):
self._check_input_dim(input)
compute_stats = not self.freezed and \
self.training and self.track_running_stats
ret = F.batch_norm(input, self.running_mean, self.running_var,
self.weight, self.bias, compute_stats,
self.momentum, self.eps)
return ret
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, '\
'affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(
**self.__dict__)
class BatchNorm2dNoSync(_BatchNorm):
"""
Equivalent to nn.BatchNorm2d
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class BatchNorm2dSync(BatchNorm2dNoSync):
"""
BatchNorm2d with automatic multi-GPU Sync
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(BatchNorm2dSync, self).__init__(
num_features, eps=eps, momentum=momentum, affine=affine,
track_running_stats=track_running_stats)
self.sync_enabled = True
self.devices = list(range(torch.cuda.device_count()))
if len(self.devices) > 1:
# Initialize queues
self.worker_ids = self.devices[1:]
self.master_queue = Queue(len(self.worker_ids))
self.worker_queues = [Queue(1) for _ in self.worker_ids]
def forward(self, x):
compute_stats = not self.freezed and \
self.training and self.track_running_stats
if self.sync_enabled and compute_stats and len(self.devices) > 1:
if x.get_device() == self.devices[0]:
# Master mode
extra = {
"is_master": True,
"master_queue": self.master_queue,
"worker_queues": self.worker_queues,
"worker_ids": self.worker_ids
}
else:
# Worker mode
extra = {
"is_master": False,
"master_queue": self.master_queue,
"worker_queue": self.worker_queues[
self.worker_ids.index(x.get_device())]
}
return batchnorm2d_sync(x, self.weight, self.bias,
self.running_mean, self.running_var,
extra, compute_stats, self.momentum,
self.eps)
return super(BatchNorm2dSync, self).forward(x)
def __repr__(self):
"""repr"""
rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \
'affine={affine}, ' \
'track_running_stats={track_running_stats},' \
'devices={devices})'
return rep.format(name=self.__class__.__name__, **self.__dict__)
#BatchNorm2d = BatchNorm2dNoSync
BatchNorm2d = BatchNorm2dSync
| 5,187 | 33.818792 | 79 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/syncbn/modules/functional/syncbn.py | """
/*****************************************************************************/
BatchNorm2dSync with multi-gpu
code referenced from : https://github.com/mapillary/inplace_abn
/*****************************************************************************/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.cuda.comm as comm
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from ._csrc import _backend
def _count_samples(x):
count = 1
for i, s in enumerate(x.size()):
if i != 1:
count *= s
return count
class BatchNorm2dSyncFunc(Function):
@staticmethod
def forward(ctx, x, weight, bias, running_mean, running_var,
extra, compute_stats=True, momentum=0.1, eps=1e-05):
def _parse_extra(ctx, extra):
ctx.is_master = extra["is_master"]
if ctx.is_master:
ctx.master_queue = extra["master_queue"]
ctx.worker_queues = extra["worker_queues"]
ctx.worker_ids = extra["worker_ids"]
else:
ctx.master_queue = extra["master_queue"]
ctx.worker_queue = extra["worker_queue"]
# Save context
if extra is not None:
_parse_extra(ctx, extra)
ctx.compute_stats = compute_stats
ctx.momentum = momentum
ctx.eps = eps
ctx.affine = weight is not None and bias is not None
if ctx.compute_stats:
N = _count_samples(x) * (ctx.master_queue.maxsize + 1)
assert N > 1
# 1. compute sum(x) and sum(x^2)
xsum, xsqsum = _backend.syncbn_sum_sqsum(x.detach())
if ctx.is_master:
xsums, xsqsums = [xsum], [xsqsum]
# master : gatther all sum(x) and sum(x^2) from slaves
for _ in range(ctx.master_queue.maxsize):
xsum_w, xsqsum_w = ctx.master_queue.get()
ctx.master_queue.task_done()
xsums.append(xsum_w)
xsqsums.append(xsqsum_w)
xsum = comm.reduce_add(xsums)
xsqsum = comm.reduce_add(xsqsums)
mean = xsum / N
sumvar = xsqsum - xsum * mean
var = sumvar / N
uvar = sumvar / (N - 1)
# master : broadcast global mean, variance to all slaves
tensors = comm.broadcast_coalesced(
(mean, uvar, var), [mean.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
# slave : send sum(x) and sum(x^2) to master
ctx.master_queue.put((xsum, xsqsum))
# slave : get global mean and variance
mean, uvar, var = ctx.worker_queue.get()
ctx.worker_queue.task_done()
# Update running stats
running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean)
running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * uvar)
ctx.N = N
ctx.save_for_backward(x, weight, bias, mean, var)
else:
mean, var = running_mean, running_var
# do batch norm forward
z = _backend.syncbn_forward(x, weight, bias, mean, var,
ctx.affine, ctx.eps)
return z
@staticmethod
@once_differentiable
def backward(ctx, dz):
x, weight, bias, mean, var = ctx.saved_tensors
dz = dz.contiguous()
# 1. compute \sum(\frac{dJ}{dy_i}) and \sum(\frac{dJ}{dy_i}*\hat{x_i})
sum_dz, sum_dz_xhat = _backend.syncbn_backward_xhat(
dz, x, mean, var, ctx.eps)
if ctx.is_master:
sum_dzs, sum_dz_xhats = [sum_dz], [sum_dz_xhat]
# master : gatther from slaves
for _ in range(ctx.master_queue.maxsize):
sum_dz_w, sum_dz_xhat_w = ctx.master_queue.get()
ctx.master_queue.task_done()
sum_dzs.append(sum_dz_w)
sum_dz_xhats.append(sum_dz_xhat_w)
# master : compute global stats
sum_dz = comm.reduce_add(sum_dzs)
sum_dz_xhat = comm.reduce_add(sum_dz_xhats)
sum_dz /= ctx.N
sum_dz_xhat /= ctx.N
# master : broadcast global stats
tensors = comm.broadcast_coalesced(
(sum_dz, sum_dz_xhat), [mean.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
# slave : send to master
ctx.master_queue.put((sum_dz, sum_dz_xhat))
# slave : get global stats
sum_dz, sum_dz_xhat = ctx.worker_queue.get()
ctx.worker_queue.task_done()
# do batch norm backward
dx, dweight, dbias = _backend.syncbn_backward(
dz, x, weight, bias, mean, var, sum_dz, sum_dz_xhat,
ctx.affine, ctx.eps)
return dx, dweight, dbias, \
None, None, None, None, None, None
batchnorm2d_sync = BatchNorm2dSyncFunc.apply
__all__ = ["batchnorm2d_sync"]
| 5,291 | 37.347826 | 79 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/syncbn/modules/functional/_csrc.py | """
/*****************************************************************************/
Extension module loader
code referenced from : https://github.com/facebookresearch/maskrcnn-benchmark
/*****************************************************************************/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import torch
try:
from torch.utils.cpp_extension import load
from torch.utils.cpp_extension import CUDA_HOME
except ImportError:
raise ImportError(
"The cpp layer extensions requires PyTorch 0.4 or higher")
def _load_C_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
this_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
sources_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
sources_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
sources = main_file + sources_cpu
extra_cflags = []
extra_cuda_cflags = []
if torch.cuda.is_available() and CUDA_HOME is not None:
sources.extend(sources_cuda)
extra_cflags = ["-O3", "-DWITH_CUDA"]
extra_cuda_cflags = ["--expt-extended-lambda"]
sources = [os.path.join(this_dir, s) for s in sources]
extra_include_paths = [this_dir]
return load(
name="ext_lib",
sources=sources,
extra_cflags=extra_cflags,
extra_include_paths=extra_include_paths,
extra_cuda_cflags=extra_cuda_cflags,
)
_backend = _load_C_extensions()
| 1,586 | 27.854545 | 79 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/backboned/hrnet.py | import torch.nn as nn
from iharm.model.modeling.hrnet_ocr import HighResolutionNet
from iharm.model.backboned.ih_model import IHModelWithBackbone
from iharm.model.modifiers import LRMult
from iharm.model.modeling.basic_blocks import MaxPoolDownSize
class HRNetIHModel(IHModelWithBackbone):
def __init__(
self,
base_config,
downsize_hrnet_input=False, mask_fusion='sum',
lr_mult=0.1, cat_hrnet_outputs=True, pyramid_channels=-1,
ocr=64, width=18, small=True,
mode='cat',
**base_kwargs
):
"""
Creates image harmonization model supported by the features extracted from the pre-trained HRNet backbone.
HRNet outputs feature maps on 4 different resolutions.
Parameters
----------
base_config : dict
Configuration dict for the base model, to which the backbone features are incorporated.
base_config contains model class and init parameters, examples can be found in iharm.mconfigs.base_models
downsize_backbone_input : bool
If the input image should be half-sized for the backbone.
mask_fusion : str
How to fuse the binary mask with the backbone input:
'sum': apply convolution to the mask and sum it with the output of the first convolution in the backbone
'rgb': concatenate the mask to the input image and translate it back to 3 channels with convolution
otherwise: do not fuse mask with the backbone input
lr_mult : float
Multiply learning rate to lr_mult when updating the weights of the backbone.
cat_hrnet_outputs : bool
If 4 HRNet outputs should be resized and concatenated to a single tensor.
pyramid_channels : int
When HRNet outputs are concatenated to a single one, it can be consequently downsized
to produce a feature pyramid.
The pyramid features are then fused with the encoder outputs in the base model on multiple layers.
Each pyramid feature map contains equal number of channels equal to pyramid_channels.
If pyramid_channels <= 0, the feature pyramid is not constructed.
ocr : int
When HRNet outputs are concatenated to a single one, the OCR module can be applied
resulting in feature map with (2 * ocr) channels. If ocr <= 0 the OCR module is not applied.
width : int
Width of the HRNet blocks.
small : bool
If True, HRNet contains 2 blocks at each stage and 4 otherwise.
mode : str
How to fuse the backbone features with the encoder outputs in the base model:
'sum': apply convolution to the backbone feature map obtaining number of channels
same as in the encoder output and sum them
'cat': concatenate the backbone feature map with the encoder output
'catc': concatenate the backbone feature map with the encoder output and apply convolution obtaining
number of channels same as in the encoder output
otherwise: the backbone features are not incorporated into the base model
base_kwargs : dict
any kwargs associated with the base model
"""
params = base_config['params']
params.update(base_kwargs)
depth = params['depth']
backbone = HRNetBB(
cat_outputs=cat_hrnet_outputs,
pyramid_channels=pyramid_channels,
pyramid_depth=min(depth - 2 if not downsize_hrnet_input else depth - 3, 4),
width=width, ocr=ocr, small=small,
lr_mult=lr_mult,
)
params.update(dict(
backbone_from=3 if downsize_hrnet_input else 2,
backbone_channels=backbone.output_channels,
backbone_mode=mode
))
base_model = base_config['model'](**params)
super(HRNetIHModel, self).__init__(base_model, backbone, downsize_hrnet_input, mask_fusion)
class HRNetBB(nn.Module):
def __init__(
self,
cat_outputs=True,
pyramid_channels=256, pyramid_depth=4,
width=18, ocr=64, small=True,
lr_mult=0.1,
):
super(HRNetBB, self).__init__()
self.cat_outputs = cat_outputs
self.ocr_on = ocr > 0 and cat_outputs
self.pyramid_on = pyramid_channels > 0 and cat_outputs
self.hrnet = HighResolutionNet(width, 2, ocr_width=ocr, small=small)
self.hrnet.apply(LRMult(lr_mult))
if self.ocr_on:
self.hrnet.ocr_distri_head.apply(LRMult(1.0))
self.hrnet.ocr_gather_head.apply(LRMult(1.0))
self.hrnet.conv3x3_ocr.apply(LRMult(1.0))
hrnet_cat_channels = [width * 2 ** i for i in range(4)]
if self.pyramid_on:
self.output_channels = [pyramid_channels] * 4
elif self.ocr_on:
self.output_channels = [ocr * 2]
elif self.cat_outputs:
self.output_channels = [sum(hrnet_cat_channels)]
else:
self.output_channels = hrnet_cat_channels
if self.pyramid_on:
downsize_in_channels = ocr * 2 if self.ocr_on else sum(hrnet_cat_channels)
self.downsize = MaxPoolDownSize(downsize_in_channels, pyramid_channels, pyramid_channels, pyramid_depth)
def forward(self, image, mask, mask_features):
if not self.cat_outputs:
return self.hrnet.compute_hrnet_feats(image, mask_features, return_list=True)
outputs = list(self.hrnet(image, mask, mask_features))
if self.pyramid_on:
outputs = self.downsize(outputs[0])
return outputs
def load_pretrained_weights(self, pretrained_path):
self.hrnet.load_pretrained_weights(pretrained_path)
| 5,787 | 43.523077 | 117 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/backboned/ih_model.py | import torch
import torch.nn as nn
from iharm.model.ops import SimpleInputFusion, ScaleLayer
class IHModelWithBackbone(nn.Module):
def __init__(
self,
model, backbone,
downsize_backbone_input=False,
mask_fusion='sum',
backbone_conv1_channels=64,
):
"""
Creates image harmonization model supported by the features extracted from the pre-trained backbone.
Parameters
----------
model : nn.Module
Image harmonization model takes image and mask as an input and handles features from the backbone network.
backbone : nn.Module
Backbone model accepts RGB image and returns a list of features.
downsize_backbone_input : bool
If the input image should be half-sized for the backbone.
mask_fusion : str
How to fuse the binary mask with the backbone input:
'sum': apply convolution to the mask and sum it with the output of the first convolution in the backbone
'rgb': concatenate the mask to the input image and translate it back to 3 channels with convolution
otherwise: do not fuse mask with the backbone input
backbone_conv1_channels : int
If mask_fusion is 'sum', define the number of channels for the convolution applied to the mask.
"""
super(IHModelWithBackbone, self).__init__()
self.downsize_backbone_input = downsize_backbone_input
self.mask_fusion = mask_fusion
self.backbone = backbone
self.model = model
if mask_fusion == 'rgb':
self.fusion = SimpleInputFusion()
elif mask_fusion == 'sum':
self.mask_conv = nn.Sequential(
nn.Conv2d(1, backbone_conv1_channels, kernel_size=3, stride=2, padding=1, bias=True),
ScaleLayer(init_value=0.1, lr_mult=1)
)
def forward(self, image, mask):
"""
Forward the backbone model and then the base model, supported by the backbone feature maps.
Return model predictions.
Parameters
----------
image : torch.Tensor
Input RGB image.
mask : torch.Tensor
Binary mask of the foreground region.
Returns
-------
torch.Tensor
Harmonized RGB image.
"""
backbone_image = image
backbone_mask = torch.cat((mask, 1.0 - mask), dim=1)
if self.downsize_backbone_input:
backbone_image = nn.functional.interpolate(
backbone_image, scale_factor=0.5,
mode='bilinear', align_corners=True
)
backbone_mask = nn.functional.interpolate(
backbone_mask, backbone_image.size()[2:],
mode='bilinear', align_corners=True
)
backbone_image = (
self.fusion(backbone_image, backbone_mask[:, :1])
if self.mask_fusion == 'rgb' else
backbone_image
)
backbone_mask_features = self.mask_conv(backbone_mask[:, :1]) if self.mask_fusion == 'sum' else None
backbone_features = self.backbone(backbone_image, backbone_mask, backbone_mask_features)
output = self.model(image, mask, backbone_features)
return output
| 3,309 | 37.045977 | 118 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/backboned/deeplab.py | from torch import nn as nn
from iharm.model.modeling.deeplab_v3 import DeepLabV3Plus
from iharm.model.backboned.ih_model import IHModelWithBackbone
from iharm.model.modifiers import LRMult
from iharm.model.modeling.basic_blocks import MaxPoolDownSize
class DeepLabIHModel(IHModelWithBackbone):
def __init__(
self,
base_config,
mask_fusion='sum',
deeplab_backbone='resnet34',
lr_mult=0.1,
pyramid_channels=-1, deeplab_ch=256,
mode='cat',
**base_kwargs
):
"""
Creates image harmonization model supported by the features extracted from the pre-trained DeepLab backbone.
Parameters
----------
base_config : dict
Configuration dict for the base model, to which the backbone features are incorporated.
base_config contains model class and init parameters, examples can be found in iharm.mconfigs.base_models
mask_fusion : str
How to fuse the binary mask with the backbone input:
'sum': apply convolution to the mask and sum it with the output of the first convolution in the backbone
'rgb': concatenate the mask to the input image and translate it back to 3 channels with convolution
otherwise: do not fuse mask with the backbone input
deeplab_backbone : str
ResNet backbone name.
lr_mult : float
Multiply learning rate to lr_mult when updating the weights of the backbone.
pyramid_channels : int
The DeepLab output can be consequently downsized to produce a feature pyramid.
The pyramid features are then fused with the encoder outputs in the base model on multiple layers.
Each pyramid feature map contains equal number of channels equal to pyramid_channels.
If pyramid_channels <= 0, the feature pyramid is not constructed.
deeplab_ch : int
Number of channels for output DeepLab layer and some in the middle.
mode : str
How to fuse the backbone features with the encoder outputs in the base model:
'sum': apply convolution to the backbone feature map obtaining number of channels
same as in the encoder output and sum them
'cat': concatenate the backbone feature map with the encoder output
'catc': concatenate the backbone feature map with the encoder output and apply convolution obtaining
number of channels same as in the encoder output
otherwise: the backbone features are not incorporated into the base model
base_kwargs : dict
any kwargs associated with the base model
"""
params = base_config['params']
params.update(base_kwargs)
depth = params['depth']
backbone = DeepLabBB(pyramid_channels, deeplab_ch, deeplab_backbone, lr_mult)
downsize_input = depth > 7
params.update(dict(
backbone_from=3 if downsize_input else 2,
backbone_channels=backbone.output_channels,
backbone_mode=mode
))
base_model = base_config['model'](**params)
super(DeepLabIHModel, self).__init__(base_model, backbone, downsize_input, mask_fusion)
class DeepLabBB(nn.Module):
def __init__(
self,
pyramid_channels=256,
deeplab_ch=256,
backbone='resnet34',
backbone_lr_mult=0.1,
):
super(DeepLabBB, self).__init__()
self.pyramid_on = pyramid_channels > 0
if self.pyramid_on:
self.output_channels = [pyramid_channels] * 4
else:
self.output_channels = [deeplab_ch]
self.deeplab = DeepLabV3Plus(backbone=backbone,
ch=deeplab_ch,
project_dropout=0.2,
norm_layer=nn.BatchNorm2d,
backbone_norm_layer=nn.BatchNorm2d)
self.deeplab.backbone.apply(LRMult(backbone_lr_mult))
if self.pyramid_on:
self.downsize = MaxPoolDownSize(deeplab_ch, pyramid_channels, pyramid_channels, 4)
def forward(self, image, mask, mask_features):
outputs = list(self.deeplab(image, mask_features))
if self.pyramid_on:
outputs = self.downsize(outputs[0])
return outputs
def load_pretrained_weights(self):
self.deeplab.load_pretrained_weights()
| 4,474 | 41.619048 | 117 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/base/dih_model.py | import torch
import torch.nn as nn
from iharm.model.modeling.conv_autoencoder import ConvEncoder, DeconvDecoder
class DeepImageHarmonization(nn.Module):
def __init__(
self,
depth,
norm_layer=nn.BatchNorm2d, batchnorm_from=0,
attend_from=-1,
image_fusion=False,
ch=64, max_channels=512,
backbone_from=-1, backbone_channels=None, backbone_mode=''
):
super(DeepImageHarmonization, self).__init__()
self.depth = depth
self.encoder = ConvEncoder(
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels, backbone_mode
)
self.decoder = DeconvDecoder(depth, self.encoder.blocks_channels, norm_layer, attend_from, image_fusion)
def forward(self, image, mask, backbone_features=None):
x = torch.cat((image, mask), dim=1)
intermediates = self.encoder(x, backbone_features)
output = self.decoder(intermediates, image, mask)
return {'images': output}
| 1,049 | 32.870968 | 112 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/base/iseunet_v1.py | import torch
import torch.nn as nn
from iharm.model.modeling.unet import UNetEncoder, UNetDecoder
from iharm.model.ops import MaskedChannelAttention
class ISEUNetV1(nn.Module):
def __init__(
self,
depth,
norm_layer=nn.BatchNorm2d, batchnorm_from=2,
attend_from=3,
image_fusion=False,
ch=64, max_channels=512,
backbone_from=-1, backbone_channels=None, backbone_mode=''
):
super(ISEUNetV1, self).__init__()
self.depth = depth
self.encoder = UNetEncoder(
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels, backbone_mode
)
self.decoder = UNetDecoder(
depth, self.encoder.block_channels,
norm_layer,
attention_layer=MaskedChannelAttention,
attend_from=attend_from,
image_fusion=image_fusion
)
def forward(self, image, mask, backbone_features=None):
x = torch.cat((image, mask), dim=1)
intermediates = self.encoder(x, backbone_features)
output = self.decoder(intermediates, image, mask)
return {'images': output}
| 1,191 | 30.368421 | 66 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/base/ssam_model.py | import torch
from functools import partial
from torch import nn as nn
from iharm.model.modeling.basic_blocks import ConvBlock, GaussianSmoothing
from iharm.model.modeling.unet import UNetEncoder, UNetDecoder
from iharm.model.ops import ChannelAttention
class SSAMImageHarmonization(nn.Module):
def __init__(
self,
depth,
norm_layer=nn.BatchNorm2d, batchnorm_from=2,
attend_from=3, attention_mid_k=2.0,
image_fusion=False,
ch=64, max_channels=512,
backbone_from=-1, backbone_channels=None, backbone_mode=''
):
super(SSAMImageHarmonization, self).__init__()
self.depth = depth
self.encoder = UNetEncoder(
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels, backbone_mode
)
self.decoder = UNetDecoder(
depth, self.encoder.block_channels,
norm_layer,
attention_layer=partial(SpatialSeparatedAttention, mid_k=attention_mid_k),
attend_from=attend_from,
image_fusion=image_fusion
)
def forward(self, image, mask, backbone_features=None):
x = torch.cat((image, mask), dim=1)
intermediates = self.encoder(x, backbone_features)
output = self.decoder(intermediates, image, mask)
return {'images': output}
class SpatialSeparatedAttention(nn.Module):
def __init__(self, in_channels, norm_layer, activation, mid_k=2.0):
super(SpatialSeparatedAttention, self).__init__()
self.background_gate = ChannelAttention(in_channels)
self.foreground_gate = ChannelAttention(in_channels)
self.mix_gate = ChannelAttention(in_channels)
mid_channels = int(mid_k * in_channels)
self.learning_block = nn.Sequential(
ConvBlock(
in_channels, mid_channels,
kernel_size=3, stride=1, padding=1,
norm_layer=norm_layer, activation=activation,
bias=False,
),
ConvBlock(
mid_channels, in_channels,
kernel_size=3, stride=1, padding=1,
norm_layer=norm_layer, activation=activation,
bias=False,
),
)
self.mask_blurring = GaussianSmoothing(1, 7, 1, padding=3)
def forward(self, x, mask):
mask = self.mask_blurring(nn.functional.interpolate(
mask, size=x.size()[-2:],
mode='bilinear', align_corners=True
))
background = self.background_gate(x)
foreground = self.learning_block(self.foreground_gate(x))
mix = self.mix_gate(x)
output = mask * (foreground + mix) + (1 - mask) * background
return output
| 2,771 | 35 | 86 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/basic_blocks.py | import math
import numbers
import torch
import torch.nn.functional as F
from torch import nn as nn
class ConvHead(nn.Module):
def __init__(self, out_channels, in_channels=32, num_layers=1,
kernel_size=3, padding=1,
norm_layer=nn.BatchNorm2d):
super(ConvHead, self).__init__()
convhead = []
for i in range(num_layers):
convhead.extend([
nn.Conv2d(in_channels, in_channels, kernel_size, padding=padding),
nn.ReLU(),
norm_layer(in_channels) if norm_layer is not None else nn.Identity()
])
convhead.append(nn.Conv2d(in_channels, out_channels, 1, padding=0))
self.convhead = nn.Sequential(*convhead)
def forward(self, *inputs):
return self.convhead(inputs[0])
class SepConvHead(nn.Module):
def __init__(self, num_outputs, in_channels, mid_channels, num_layers=1,
kernel_size=3, padding=1, dropout_ratio=0.0, dropout_indx=0,
norm_layer=nn.BatchNorm2d):
super(SepConvHead, self).__init__()
sepconvhead = []
for i in range(num_layers):
sepconvhead.append(
SeparableConv2d(in_channels=in_channels if i == 0 else mid_channels,
out_channels=mid_channels,
dw_kernel=kernel_size, dw_padding=padding,
norm_layer=norm_layer, activation='relu')
)
if dropout_ratio > 0 and dropout_indx == i:
sepconvhead.append(nn.Dropout(dropout_ratio))
sepconvhead.append(
nn.Conv2d(in_channels=mid_channels, out_channels=num_outputs, kernel_size=1, padding=0)
)
self.layers = nn.Sequential(*sepconvhead)
def forward(self, *inputs):
x = inputs[0]
return self.layers(x)
def select_activation_function(activation):
if isinstance(activation, str):
if activation.lower() == 'relu':
return nn.ReLU
elif activation.lower() == 'softplus':
return nn.Softplus
else:
raise ValueError(f"Unknown activation type {activation}")
elif isinstance(activation, nn.Module):
return activation
else:
raise ValueError(f"Unknown activation type {activation}")
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, dw_kernel, dw_padding, dw_stride=1,
activation=None, use_bias=False, norm_layer=None):
super(SeparableConv2d, self).__init__()
_activation = select_activation_function(activation)
self.body = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=dw_kernel, stride=dw_stride,
padding=dw_padding, bias=use_bias, groups=in_channels),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=use_bias),
norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
_activation()
)
def forward(self, x):
return self.body(x)
class ConvBlock(nn.Module):
def __init__(
self,
in_channels, out_channels,
kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation=nn.ELU,
bias=True,
):
super(ConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
activation(),
)
def forward(self, x):
return self.block(x)
class GaussianSmoothing(nn.Module):
"""
https://discuss.pytorch.org/t/is-there-anyway-to-do-gaussian-filtering-for-an-image-2d-3d-in-pytorch/12351/10
Apply gaussian smoothing on a tensor (1d, 2d, 3d).
Filtering is performed seperately for each channel in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors.
Output will have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data. Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, padding=0, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the gaussian function of each dimension.
kernel = 1.
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for size, std, grid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2.
kernel *= torch.exp(-((grid - mean) / std) ** 2 / 2) / (std * (2 * math.pi) ** 0.5)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight.
kernel = kernel.view(1, 1, *kernel.size())
kernel = torch.repeat_interleave(kernel, channels, 0)
self.register_buffer('weight', kernel)
self.groups = channels
self.padding = padding
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, padding=self.padding, groups=self.groups)
class MaxPoolDownSize(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, depth):
super(MaxPoolDownSize, self).__init__()
self.depth = depth
self.reduce_conv = ConvBlock(in_channels, mid_channels, kernel_size=1, stride=1, padding=0)
self.convs = nn.ModuleList([
ConvBlock(mid_channels, out_channels, kernel_size=3, stride=1, padding=1)
for conv_i in range(depth)
])
self.pool2d = nn.MaxPool2d(kernel_size=2)
def forward(self, x):
outputs = []
output = self.reduce_conv(x)
for conv_i, conv in enumerate(self.convs):
output = output if conv_i == 0 else self.pool2d(output)
outputs.append(conv(output))
return outputs
| 6,923 | 36.225806 | 117 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/deeplab_v3.py | from contextlib import ExitStack
import torch
from torch import nn
import torch.nn.functional as F
from iharm.model.modeling.basic_blocks import select_activation_function
from .basic_blocks import SeparableConv2d
from .resnet import ResNetBackbone
class DeepLabV3Plus(nn.Module):
def __init__(self, backbone='resnet50', norm_layer=nn.BatchNorm2d,
backbone_norm_layer=None,
ch=256,
project_dropout=0.5,
inference_mode=False,
**kwargs):
super(DeepLabV3Plus, self).__init__()
if backbone_norm_layer is None:
backbone_norm_layer = norm_layer
self.backbone_name = backbone
self.norm_layer = norm_layer
self.backbone_norm_layer = backbone_norm_layer
self.inference_mode = False
self.ch = ch
self.aspp_in_channels = 2048
self.skip_project_in_channels = 256 # layer 1 out_channels
self._kwargs = kwargs
if backbone == 'resnet34':
self.aspp_in_channels = 512
self.skip_project_in_channels = 64
self.backbone = ResNetBackbone(backbone=self.backbone_name, pretrained_base=False,
norm_layer=self.backbone_norm_layer, **kwargs)
self.head = _DeepLabHead(in_channels=ch + 32, mid_channels=ch, out_channels=ch,
norm_layer=self.norm_layer)
self.skip_project = _SkipProject(self.skip_project_in_channels, 32, norm_layer=self.norm_layer)
self.aspp = _ASPP(in_channels=self.aspp_in_channels,
atrous_rates=[12, 24, 36],
out_channels=ch,
project_dropout=project_dropout,
norm_layer=self.norm_layer)
if inference_mode:
self.set_prediction_mode()
def load_pretrained_weights(self):
pretrained = ResNetBackbone(backbone=self.backbone_name, pretrained_base=True,
norm_layer=self.backbone_norm_layer, **self._kwargs)
backbone_state_dict = self.backbone.state_dict()
pretrained_state_dict = pretrained.state_dict()
backbone_state_dict.update(pretrained_state_dict)
self.backbone.load_state_dict(backbone_state_dict)
if self.inference_mode:
for param in self.backbone.parameters():
param.requires_grad = False
def set_prediction_mode(self):
self.inference_mode = True
self.eval()
def forward(self, x, mask_features=None):
with ExitStack() as stack:
if self.inference_mode:
stack.enter_context(torch.no_grad())
c1, _, c3, c4 = self.backbone(x, mask_features)
c1 = self.skip_project(c1)
x = self.aspp(c4)
x = F.interpolate(x, c1.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, c1), dim=1)
x = self.head(x)
return x,
class _SkipProject(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d):
super(_SkipProject, self).__init__()
_activation = select_activation_function("relu")
self.skip_project = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
norm_layer(out_channels),
_activation()
)
def forward(self, x):
return self.skip_project(x)
class _DeepLabHead(nn.Module):
def __init__(self, out_channels, in_channels, mid_channels=256, norm_layer=nn.BatchNorm2d):
super(_DeepLabHead, self).__init__()
self.block = nn.Sequential(
SeparableConv2d(in_channels=in_channels, out_channels=mid_channels, dw_kernel=3,
dw_padding=1, activation='relu', norm_layer=norm_layer),
SeparableConv2d(in_channels=mid_channels, out_channels=mid_channels, dw_kernel=3,
dw_padding=1, activation='relu', norm_layer=norm_layer),
nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1)
)
def forward(self, x):
return self.block(x)
class _ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels=256,
project_dropout=0.5, norm_layer=nn.BatchNorm2d):
super(_ASPP, self).__init__()
b0 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=False),
norm_layer(out_channels),
nn.ReLU()
)
rate1, rate2, rate3 = tuple(atrous_rates)
b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer)
b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer)
b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer)
b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer)
self.concurent = nn.ModuleList([b0, b1, b2, b3, b4])
project = [
nn.Conv2d(in_channels=5*out_channels, out_channels=out_channels,
kernel_size=1, bias=False),
norm_layer(out_channels),
nn.ReLU()
]
if project_dropout > 0:
project.append(nn.Dropout(project_dropout))
self.project = nn.Sequential(*project)
def forward(self, x):
x = torch.cat([block(x) for block in self.concurent], dim=1)
return self.project(x)
class _AsppPooling(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer):
super(_AsppPooling, self).__init__()
self.gap = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, bias=False),
norm_layer(out_channels),
nn.ReLU()
)
def forward(self, x):
pool = self.gap(x)
return F.interpolate(pool, x.size()[2:], mode='bilinear', align_corners=True)
def _ASPPConv(in_channels, out_channels, atrous_rate, norm_layer):
block = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, padding=atrous_rate,
dilation=atrous_rate, bias=False),
norm_layer(out_channels),
nn.ReLU()
)
return block
| 6,392 | 35.118644 | 103 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/resnet.py | import torch
from .resnetv1b import resnet34_v1b, resnet50_v1s, resnet101_v1s, resnet152_v1s
class ResNetBackbone(torch.nn.Module):
def __init__(self, backbone='resnet50', pretrained_base=True, dilated=True, **kwargs):
super(ResNetBackbone, self).__init__()
if backbone == 'resnet34':
pretrained = resnet34_v1b(pretrained=pretrained_base, dilated=dilated, **kwargs)
elif backbone == 'resnet50':
pretrained = resnet50_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs)
elif backbone == 'resnet101':
pretrained = resnet101_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs)
elif backbone == 'resnet152':
pretrained = resnet152_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs)
else:
raise RuntimeError(f'unknown backbone: {backbone}')
self.conv1 = pretrained.conv1
self.bn1 = pretrained.bn1
self.relu = pretrained.relu
self.maxpool = pretrained.maxpool
self.layer1 = pretrained.layer1
self.layer2 = pretrained.layer2
self.layer3 = pretrained.layer3
self.layer4 = pretrained.layer4
def forward(self, x, mask_features=None):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if mask_features is not None:
x = x + mask_features
x = self.maxpool(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
return c1, c2, c3, c4
| 1,552 | 35.97619 | 93 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/hrnet_ocr.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._utils
from .ocr import SpatialOCR_Module, SpatialGather_Module
from .resnetv1b import BasicBlockV1b, BottleneckV1b
from iharm.utils.log import logger
relu_inplace = True
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method,multi_scale_output=True,
norm_layer=nn.BatchNorm2d, align_corners=True):
super(HighResolutionModule, self).__init__()
self._check_branches(num_branches, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.norm_layer = norm_layer
self.align_corners = align_corners
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=relu_inplace)
def _check_branches(self, num_branches, num_blocks, num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(num_channels[branch_index] * block.expansion),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride,
downsample=downsample, norm_layer=self.norm_layer))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index],
norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(in_channels=num_inchannels[j],
out_channels=num_inchannels[i],
kernel_size=1,
bias=False),
self.norm_layer(num_inchannels[i])))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
kernel_size=3, stride=2, padding=1, bias=False),
self.norm_layer(num_outchannels_conv3x3)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
kernel_size=3, stride=2, padding=1, bias=False),
self.norm_layer(num_outchannels_conv3x3),
nn.ReLU(inplace=relu_inplace)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear', align_corners=self.align_corners)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
class HighResolutionNet(nn.Module):
def __init__(self, width, num_classes, ocr_width=256, small=False,
norm_layer=nn.BatchNorm2d, align_corners=True):
super(HighResolutionNet, self).__init__()
self.norm_layer = norm_layer
self.width = width
self.ocr_width = ocr_width
self.ocr_on = ocr_width > 0
self.align_corners = align_corners
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = norm_layer(64)
self.relu = nn.ReLU(inplace=relu_inplace)
num_blocks = 2 if small else 4
stage1_num_channels = 64
self.layer1 = self._make_layer(BottleneckV1b, 64, stage1_num_channels, blocks=num_blocks)
stage1_out_channel = BottleneckV1b.expansion * stage1_num_channels
self.stage2_num_branches = 2
num_channels = [width, 2 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_inchannels)
self.stage2, pre_stage_channels = self._make_stage(
BasicBlockV1b, num_inchannels=num_inchannels, num_modules=1, num_branches=self.stage2_num_branches,
num_blocks=2 * [num_blocks], num_channels=num_channels)
self.stage3_num_branches = 3
num_channels = [width, 2 * width, 4 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_inchannels)
self.stage3, pre_stage_channels = self._make_stage(
BasicBlockV1b, num_inchannels=num_inchannels,
num_modules=3 if small else 4, num_branches=self.stage3_num_branches,
num_blocks=3 * [num_blocks], num_channels=num_channels)
self.stage4_num_branches = 4
num_channels = [width, 2 * width, 4 * width, 8 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_inchannels)
self.stage4, pre_stage_channels = self._make_stage(
BasicBlockV1b, num_inchannels=num_inchannels, num_modules=2 if small else 3,
num_branches=self.stage4_num_branches,
num_blocks=4 * [num_blocks], num_channels=num_channels)
if self.ocr_on:
last_inp_channels = np.int(np.sum(pre_stage_channels))
ocr_mid_channels = 2 * ocr_width
ocr_key_channels = ocr_width
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(last_inp_channels, ocr_mid_channels,
kernel_size=3, stride=1, padding=1),
norm_layer(ocr_mid_channels),
nn.ReLU(inplace=relu_inplace),
)
self.ocr_gather_head = SpatialGather_Module(num_classes)
self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
norm_layer=norm_layer,
align_corners=align_corners)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False),
self.norm_layer(num_channels_cur_layer[i]),
nn.ReLU(inplace=relu_inplace)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(inchannels, outchannels,
kernel_size=3, stride=2, padding=1, bias=False),
self.norm_layer(outchannels),
nn.ReLU(inplace=relu_inplace)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(inplanes, planes, stride,
downsample=downsample, norm_layer=self.norm_layer))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(self, block, num_inchannels,
num_modules, num_branches, num_blocks, num_channels,
fuse_method='SUM',
multi_scale_output=True):
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output,
norm_layer=self.norm_layer,
align_corners=self.align_corners)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x, mask=None, additional_features=None):
hrnet_feats = self.compute_hrnet_feats(x, additional_features)
if not self.ocr_on:
return hrnet_feats,
ocr_feats = self.conv3x3_ocr(hrnet_feats)
mask = nn.functional.interpolate(mask, size=ocr_feats.size()[2:], mode='bilinear', align_corners=True)
context = self.ocr_gather_head(ocr_feats, mask)
ocr_feats = self.ocr_distri_head(ocr_feats, context)
return ocr_feats,
def compute_hrnet_feats(self, x, additional_features, return_list=False):
x = self.compute_pre_stage_features(x, additional_features)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_num_branches):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_num_branches):
if self.transition2[i] is not None:
if i < self.stage2_num_branches:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_num_branches):
if self.transition3[i] is not None:
if i < self.stage3_num_branches:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
if return_list:
return x
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w),
mode='bilinear', align_corners=self.align_corners)
x2 = F.interpolate(x[2], size=(x0_h, x0_w),
mode='bilinear', align_corners=self.align_corners)
x3 = F.interpolate(x[3], size=(x0_h, x0_w),
mode='bilinear', align_corners=self.align_corners)
return torch.cat([x[0], x1, x2, x3], 1)
def compute_pre_stage_features(self, x, additional_features):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if additional_features is not None:
x = x + additional_features
x = self.conv2(x)
x = self.bn2(x)
return self.relu(x)
def load_pretrained_weights(self, pretrained_path=''):
model_dict = self.state_dict()
if not os.path.exists(pretrained_path):
print(f'\nFile "{pretrained_path}" does not exist.')
print('You need to specify the correct path to the pre-trained weights.\n'
'You can download the weights for HRNet from the repository:\n'
'https://github.com/HRNet/HRNet-Image-Classification')
exit(1)
pretrained_dict = torch.load(pretrained_path, map_location={'cuda:0': 'cpu'})
pretrained_dict = {k.replace('last_layer', 'aux_head').replace('model.', ''): v for k, v in
pretrained_dict.items()}
params_count = len(pretrained_dict)
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
logger.info(f'Loaded {len(pretrained_dict)} of {params_count} pretrained parameters for HRNet')
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
| 17,393 | 42.376559 | 111 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/ocr.py | import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, h, w = probs.size(0), probs.size(1), probs.size(2), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1) # batch x hw x c
probs = F.softmax(self.scale * probs, dim=2) # batch x k x hw
ocr_context = torch.matmul(probs, feats) \
.permute(0, 2, 1).unsqueeze(3).contiguous() # batch x k x c
return ocr_context
class SpatialOCR_Module(nn.Module):
"""
Implementation of the OCR module:
We aggregate the global object representation to update the representation for each pixel.
"""
def __init__(self,
in_channels,
key_channels,
out_channels,
scale=1,
dropout=0.1,
norm_layer=nn.BatchNorm2d,
align_corners=True):
super(SpatialOCR_Module, self).__init__()
self.object_context_block = ObjectAttentionBlock2D(in_channels, key_channels, scale,
norm_layer, align_corners)
_in_channels = 2 * in_channels
self.conv_bn_dropout = nn.Sequential(
nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
nn.Sequential(norm_layer(out_channels), nn.ReLU(inplace=True)),
nn.Dropout2d(dropout)
)
def forward(self, feats, proxy_feats):
context = self.object_context_block(feats, proxy_feats)
output = self.conv_bn_dropout(torch.cat([context, feats], 1))
return output
class ObjectAttentionBlock2D(nn.Module):
'''
The basic implementation for object context block
Input:
N X C X H X W
Parameters:
in_channels : the dimension of the input feature map
key_channels : the dimension after the key/query transform
scale : choose the scale to downsample the input feature maps (save memory cost)
bn_type : specify the bn type
Return:
N X C X H X W
'''
def __init__(self,
in_channels,
key_channels,
scale=1,
norm_layer=nn.BatchNorm2d,
align_corners=True):
super(ObjectAttentionBlock2D, self).__init__()
self.scale = scale
self.in_channels = in_channels
self.key_channels = key_channels
self.align_corners = align_corners
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_pixel = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True)),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True))
)
self.f_object = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True)),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True))
)
self.f_down = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True))
)
self.f_up = nn.Sequential(
nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.in_channels), nn.ReLU(inplace=True))
)
def forward(self, x, proxy):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
if self.scale > 1:
x = self.pool(x)
query = self.f_pixel(x).view(batch_size, self.key_channels, -1)
query = query.permute(0, 2, 1)
key = self.f_object(proxy).view(batch_size, self.key_channels, -1)
value = self.f_down(proxy).view(batch_size, self.key_channels, -1)
value = value.permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels ** -.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
# add bg context ...
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.key_channels, *x.size()[2:])
context = self.f_up(context)
if self.scale > 1:
context = F.interpolate(input=context, size=(h, w),
mode='bilinear', align_corners=self.align_corners)
return context
| 5,740 | 39.429577 | 100 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/conv_autoencoder.py | import torch
from torch import nn as nn
from iharm.model.modeling.basic_blocks import ConvBlock
from iharm.model.ops import MaskedChannelAttention, FeaturesConnector
class ConvEncoder(nn.Module):
def __init__(
self,
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels=None, backbone_mode=''
):
super(ConvEncoder, self).__init__()
self.depth = depth
self.backbone_from = backbone_from
backbone_channels = [] if backbone_channels is None else backbone_channels[::-1]
in_channels = 4
out_channels = ch
self.block0 = ConvBlock(in_channels, out_channels, norm_layer=norm_layer if batchnorm_from == 0 else None)
self.block1 = ConvBlock(out_channels, out_channels, norm_layer=norm_layer if 0 <= batchnorm_from <= 1 else None)
self.blocks_channels = [out_channels, out_channels]
self.blocks_connected = nn.ModuleDict()
self.connectors = nn.ModuleDict()
for block_i in range(2, depth):
if block_i % 2:
in_channels = out_channels
else:
in_channels, out_channels = out_channels, min(2 * out_channels, max_channels)
if 0 <= backbone_from <= block_i and len(backbone_channels):
stage_channels = backbone_channels.pop()
connector = FeaturesConnector(backbone_mode, in_channels, stage_channels, in_channels)
self.connectors[f'connector{block_i}'] = connector
in_channels = connector.output_channels
self.blocks_connected[f'block{block_i}'] = ConvBlock(
in_channels, out_channels,
norm_layer=norm_layer if 0 <= batchnorm_from <= block_i else None,
padding=int(block_i < depth - 1)
)
self.blocks_channels += [out_channels]
def forward(self, x, backbone_features):
backbone_features = [] if backbone_features is None else backbone_features[::-1]
outputs = [self.block0(x)]
outputs += [self.block1(outputs[-1])]
for block_i in range(2, self.depth):
block = self.blocks_connected[f'block{block_i}']
output = outputs[-1]
connector_name = f'connector{block_i}'
if connector_name in self.connectors:
stage_features = backbone_features.pop()
connector = self.connectors[connector_name]
output = connector(output, stage_features)
outputs += [block(output)]
return outputs[::-1]
class DeconvDecoder(nn.Module):
def __init__(self, depth, encoder_blocks_channels, norm_layer, attend_from=-1, image_fusion=False):
super(DeconvDecoder, self).__init__()
self.image_fusion = image_fusion
self.deconv_blocks = nn.ModuleList()
in_channels = encoder_blocks_channels.pop()
out_channels = in_channels
for d in range(depth):
out_channels = encoder_blocks_channels.pop() if len(encoder_blocks_channels) else in_channels // 2
self.deconv_blocks.append(SEDeconvBlock(
in_channels, out_channels,
norm_layer=norm_layer,
padding=0 if d == 0 else 1,
with_se=0 <= attend_from <= d
))
in_channels = out_channels
if self.image_fusion:
self.conv_attention = nn.Conv2d(out_channels, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(out_channels, 3, kernel_size=1)
def forward(self, encoder_outputs, image, mask=None):
output = encoder_outputs[0]
for block, skip_output in zip(self.deconv_blocks[:-1], encoder_outputs[1:]):
output = block(output, mask)
output = output + skip_output
output = self.deconv_blocks[-1](output, mask)
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(output))
output = attention_map * image + (1.0 - attention_map) * self.to_rgb(output)
else:
output = self.to_rgb(output)
return output
class SEDeconvBlock(nn.Module):
def __init__(
self,
in_channels, out_channels,
kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation=nn.ELU,
with_se=False
):
super(SEDeconvBlock, self).__init__()
self.with_se = with_se
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding),
norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
activation(),
)
if self.with_se:
self.se = MaskedChannelAttention(out_channels)
def forward(self, x, mask=None):
out = self.block(x)
if self.with_se:
out = self.se(out, mask)
return out
| 4,940 | 37.601563 | 120 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/unet.py | import torch
from torch import nn as nn
from functools import partial
from iharm.model.modeling.basic_blocks import ConvBlock
from iharm.model.ops import FeaturesConnector
class UNetEncoder(nn.Module):
def __init__(
self,
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels=None, backbone_mode=''
):
super(UNetEncoder, self).__init__()
self.depth = depth
self.backbone_from = backbone_from
self.block_channels = []
backbone_channels = [] if backbone_channels is None else backbone_channels[::-1]
relu = partial(nn.ReLU, inplace=True)
in_channels = 4
out_channels = ch
self.block0 = UNetDownBlock(
in_channels, out_channels,
norm_layer=norm_layer if batchnorm_from == 0 else None,
activation=relu,
pool=True, padding=1,
)
self.block_channels.append(out_channels)
in_channels, out_channels = out_channels, min(2 * out_channels, max_channels)
self.block1 = UNetDownBlock(
in_channels, out_channels,
norm_layer=norm_layer if 0 <= batchnorm_from <= 1 else None,
activation=relu,
pool=True, padding=1,
)
self.block_channels.append(out_channels)
self.blocks_connected = nn.ModuleDict()
self.connectors = nn.ModuleDict()
for block_i in range(2, depth):
in_channels, out_channels = out_channels, min(2 * out_channels, max_channels)
if 0 <= backbone_from <= block_i and len(backbone_channels):
stage_channels = backbone_channels.pop()
connector = FeaturesConnector(backbone_mode, in_channels, stage_channels, in_channels)
self.connectors[f'connector{block_i}'] = connector
in_channels = connector.output_channels
self.blocks_connected[f'block{block_i}'] = UNetDownBlock(
in_channels, out_channels,
norm_layer=norm_layer if 0 <= batchnorm_from <= block_i else None,
activation=relu, padding=1,
pool=block_i < depth - 1,
)
self.block_channels.append(out_channels)
def forward(self, x, backbone_features):
backbone_features = [] if backbone_features is None else backbone_features[::-1]
outputs = []
block_input = x
output, block_input = self.block0(block_input)
outputs.append(output)
output, block_input = self.block1(block_input)
outputs.append(output)
for block_i in range(2, self.depth):
block = self.blocks_connected[f'block{block_i}']
connector_name = f'connector{block_i}'
if connector_name in self.connectors:
stage_features = backbone_features.pop()
connector = self.connectors[connector_name]
block_input = connector(block_input, stage_features)
output, block_input = block(block_input)
outputs.append(output)
return outputs[::-1]
class UNetDecoder(nn.Module):
def __init__(self, depth, encoder_blocks_channels, norm_layer,
attention_layer=None, attend_from=3, image_fusion=False):
super(UNetDecoder, self).__init__()
self.up_blocks = nn.ModuleList()
self.image_fusion = image_fusion
in_channels = encoder_blocks_channels.pop()
out_channels = in_channels
# Last encoder layer doesn't pool, so there're only (depth - 1) deconvs
for d in range(depth - 1):
out_channels = encoder_blocks_channels.pop() if len(encoder_blocks_channels) else in_channels // 2
stage_attention_layer = attention_layer if 0 <= attend_from <= d else None
self.up_blocks.append(UNetUpBlock(
in_channels, out_channels, out_channels,
norm_layer=norm_layer, activation=partial(nn.ReLU, inplace=True),
padding=1,
attention_layer=stage_attention_layer,
))
in_channels = out_channels
if self.image_fusion:
self.conv_attention = nn.Conv2d(out_channels, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(out_channels, 3, kernel_size=1)
def forward(self, encoder_outputs, input_image, mask):
output = encoder_outputs[0]
for block, skip_output in zip(self.up_blocks, encoder_outputs[1:]):
output = block(output, skip_output, mask)
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(output))
output = attention_map * input_image + (1.0 - attention_map) * self.to_rgb(output)
else:
output = self.to_rgb(output)
return output
class UNetDownBlock(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, activation, pool, padding):
super(UNetDownBlock, self).__init__()
self.convs = UNetDoubleConv(
in_channels, out_channels,
norm_layer=norm_layer, activation=activation, padding=padding,
)
self.pooling = nn.MaxPool2d(2, 2) if pool else nn.Identity()
def forward(self, x):
conv_x = self.convs(x)
return conv_x, self.pooling(conv_x)
class UNetUpBlock(nn.Module):
def __init__(
self,
in_channels_decoder, in_channels_encoder, out_channels,
norm_layer, activation, padding,
attention_layer,
):
super(UNetUpBlock, self).__init__()
self.upconv = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
ConvBlock(
in_channels_decoder, out_channels,
kernel_size=3, stride=1, padding=1,
norm_layer=None, activation=activation,
)
)
self.convs = UNetDoubleConv(
in_channels_encoder + out_channels, out_channels,
norm_layer=norm_layer, activation=activation, padding=padding,
)
if attention_layer is not None:
self.attention = attention_layer(in_channels_encoder + out_channels, norm_layer, activation)
else:
self.attention = None
def forward(self, x, encoder_out, mask=None):
upsample_x = self.upconv(x)
x_cat_encoder = torch.cat([encoder_out, upsample_x], dim=1)
if self.attention is not None:
x_cat_encoder = self.attention(x_cat_encoder, mask)
return self.convs(x_cat_encoder)
class UNetDoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, activation, padding):
super(UNetDoubleConv, self).__init__()
self.block = nn.Sequential(
ConvBlock(
in_channels, out_channels,
kernel_size=3, stride=1, padding=padding,
norm_layer=norm_layer, activation=activation,
),
ConvBlock(
out_channels, out_channels,
kernel_size=3, stride=1, padding=padding,
norm_layer=norm_layer, activation=activation,
),
)
def forward(self, x):
return self.block(x)
| 7,240 | 38.140541 | 110 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/model/modeling/resnetv1b.py | import torch
import torch.nn as nn
GLUON_RESNET_TORCH_HUB = 'rwightman/pytorch-pretrained-gluonresnet'
class BasicBlockV1b(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,
previous_dilation=1, norm_layer=nn.BatchNorm2d):
super(BasicBlockV1b, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=previous_dilation, dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class BottleneckV1b(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,
previous_dilation=1, norm_layer=nn.BatchNorm2d):
super(BottleneckV1b, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class ResNetV1b(nn.Module):
""" Pre-trained ResNetV1b Model, which produces the strides of 8 featuremaps at conv5.
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used (default: :class:`nn.BatchNorm2d`)
deep_stem : bool, default False
Whether to replace the 7x7 conv1 with 3 3x3 convolution layers.
avg_down : bool, default False
Whether to use average pooling for projection skip connection between stages/downsample.
final_drop : float, default 0.0
Dropout ratio before the final classification layer.
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition."
Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
def __init__(self, block, layers, classes=1000, dilated=True, deep_stem=False, stem_width=32,
avg_down=False, final_drop=0.0, norm_layer=nn.BatchNorm2d):
self.inplanes = stem_width*2 if deep_stem else 64
super(ResNetV1b, self).__init__()
if not deep_stem:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
else:
self.conv1 = nn.Sequential(
nn.Conv2d(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False),
norm_layer(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False),
norm_layer(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, 2*stem_width, kernel_size=3, stride=1, padding=1, bias=False)
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], avg_down=avg_down,
norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, avg_down=avg_down,
norm_layer=norm_layer)
if dilated:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2,
avg_down=avg_down, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4,
avg_down=avg_down, norm_layer=norm_layer)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
avg_down=avg_down, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
avg_down=avg_down, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.drop = None
if final_drop > 0.0:
self.drop = nn.Dropout(final_drop)
self.fc = nn.Linear(512 * block.expansion, classes)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
avg_down=False, norm_layer=nn.BatchNorm2d):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = []
if avg_down:
if dilation == 1:
downsample.append(
nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)
)
else:
downsample.append(
nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False)
)
downsample.extend([
nn.Conv2d(self.inplanes, out_channels=planes * block.expansion,
kernel_size=1, stride=1, bias=False),
norm_layer(planes * block.expansion)
])
downsample = nn.Sequential(*downsample)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, out_channels=planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm_layer(planes * block.expansion)
)
layers = []
if dilation in (1, 2):
layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample,
previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample,
previous_dilation=dilation, norm_layer=norm_layer))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation,
previous_dilation=dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.drop is not None:
x = self.drop(x)
x = self.fc(x)
return x
def _safe_state_dict_filtering(orig_dict, model_dict_keys):
filtered_orig_dict = {}
for k, v in orig_dict.items():
if k in model_dict_keys:
filtered_orig_dict[k] = v
else:
print(f"[ERROR] Failed to load <{k}> in backbone")
return filtered_orig_dict
def resnet34_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet34_v1b', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet50_v1s(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, stem_width=64, **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet50_v1s', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet101_v1s(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, stem_width=64, **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet101_v1s', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet152_v1s(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], deep_stem=True, stem_width=64, **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet152_v1s', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
| 10,805 | 38.01083 | 112 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/inference/predictor.py | import torch
from iharm.inference.transforms import NormalizeTensor, PadToDivisor, ToTensor, AddFlippedTensor
class Predictor(object):
def __init__(self, net, device, with_flip=False,
mean=(.485, .456, .406), std=(.229, .224, .225)):
self.device = device
self.net = net.to(self.device)
self.net.eval()
if hasattr(net, 'depth'):
size_divisor = 2 ** (net.depth + 1)
else:
size_divisor = 1
mean = torch.tensor(mean, dtype=torch.float32)
std = torch.tensor(std, dtype=torch.float32)
self.transforms = [
PadToDivisor(divisor=size_divisor, border_mode=0),
ToTensor(self.device),
NormalizeTensor(mean, std, self.device),
]
if with_flip:
self.transforms.append(AddFlippedTensor())
def predict(self, image, mask, return_numpy=True):
print(type(image))
exit()
with torch.no_grad():
for transform in self.transforms:
image, mask = transform.transform(image, mask)
predicted_image = self.net(image, mask)['images']
for transform in reversed(self.transforms):
predicted_image = transform.inv_transform(predicted_image)
predicted_image = torch.clamp(predicted_image, 0, 255)
if return_numpy:
return predicted_image.cpu().numpy()
else:
return predicted_image
| 1,474 | 31.777778 | 96 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/inference/evaluation.py | from time import time
from tqdm import trange
import torch
def evaluate_dataset(dataset, predictor, metrics_hub):
for sample_i in trange(len(dataset), desc=f'Testing on {metrics_hub.name}'):
sample = dataset.get_sample(sample_i)
sample = dataset.augment_sample(sample)
sample_mask = sample['object_mask']
predict_start = time()
pred = predictor.predict(sample['image'], sample_mask, return_numpy=False)
torch.cuda.synchronize()
metrics_hub.update_time(time() - predict_start)
target_image = torch.as_tensor(sample['target_image'], dtype=torch.float32).to(predictor.device)
sample_mask = torch.as_tensor(sample_mask, dtype=torch.float32).to(predictor.device)
with torch.no_grad():
metrics_hub.compute_and_add(pred, target_image, sample_mask)
| 841 | 39.095238 | 104 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/Huang et al./iharm/inference/transforms.py | import cv2
import torch
from collections import namedtuple
class EvalTransform:
def __init__(self):
pass
def transform(self, image, mask):
raise NotImplementedError
def inv_transform(self, image):
raise NotImplementedError
class PadToDivisor(EvalTransform):
"""
Pad side of the image so that its side is divisible by divisor.
Args:
divisor (int): desirable image size divisor
border_mode (OpenCV flag): OpenCV border mode.
fill_value (int, float, list of int, lisft of float): padding value if border_mode is cv2.BORDER_CONSTANT.
"""
PadParams = namedtuple('PadParams', ['top', 'bottom', 'left', 'right'])
def __init__(self, divisor, border_mode=cv2.BORDER_CONSTANT, fill_value=0):
super().__init__()
self.border_mode = border_mode
self.fill_value = fill_value
self.divisor = divisor
self._pads = None
def transform(self, image, mask):
self._pads = PadToDivisor.PadParams(*self._get_dim_padding(image.shape[0]),
*self._get_dim_padding(image.shape[1]))
image = cv2.copyMakeBorder(image, *self._pads, self.border_mode, value=self.fill_value)
mask = cv2.copyMakeBorder(mask, *self._pads, self.border_mode, value=self.fill_value)
return image, mask
def inv_transform(self, image):
assert self._pads is not None,\
'Something went wrong, inv_transform(...) should be called after transform(...)'
return self._remove_padding(image)
def _get_dim_padding(self, dim_size):
pad = (self.divisor - dim_size % self.divisor) % self.divisor
pad_upper = pad // 2
pad_lower = pad - pad_upper
return pad_upper, pad_lower
def _remove_padding(self, tensor):
tensor_h, tensor_w = tensor.shape[:2]
cropped = tensor[self._pads.top:tensor_h - self._pads.bottom,
self._pads.left:tensor_w - self._pads.right, :]
return cropped
class NormalizeTensor(EvalTransform):
def __init__(self, mean, std, device):
super().__init__()
self.mean = torch.as_tensor(mean).reshape(1, 3, 1, 1).to(device)
self.std = torch.as_tensor(std).reshape(1, 3, 1, 1).to(device)
def transform(self, image, mask):
image.sub_(self.mean).div_(self.std)
return image, mask
def inv_transform(self, image):
image.mul_(self.std).add_(self.mean)
return image
class ToTensor(EvalTransform):
def __init__(self, device):
super().__init__()
self.device = device
def transform(self, image, mask):
image = torch.as_tensor(image, device=self.device, dtype=torch.float32)
mask = torch.as_tensor(mask, device=self.device)
image.unsqueeze_(0)
mask.unsqueeze_(0).unsqueeze_(0)
return image.permute(0, 3, 1, 2) / 255.0, mask
def inv_transform(self, image):
image.squeeze_(0)
return 255 * image.permute(1, 2, 0)
class AddFlippedTensor(EvalTransform):
def transform(self, image, mask):
flipped_image = torch.flip(image, dims=(3,))
flipped_mask = torch.flip(mask, dims=(3,))
image = torch.cat((image, flipped_image), dim=0)
mask = torch.cat((mask, flipped_mask), dim=0)
return image, mask
def inv_transform(self, image):
return 0.5 * (image[:1] + torch.flip(image[1:], dims=(3,)))
| 3,462 | 31.980952 | 114 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/scripts/evaluate_flow.py | import os
import cv2
import numpy as np
import torch
import sys
sys.path.insert(0, '.')
from flownet import *
from flownet.resample2d_package.resample2d import Resample2d
import os
import time
import argparse
from skimage.measure import compare_mse as mse
from iharm.data.transforms import HCompose, LongestMaxSizeIfLarger
from albumentations import Resize, NoOp
import argparse
crop_size = (256, 256)
val_augmentator = HCompose([Resize(*crop_size)])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, default=None,
help='')
parser.add_argument('--dataset_path_next', type=str, default=None,
help='')
parser.add_argument('--cur_result', type=str, default=None,
help='')
parser.add_argument('--next_result', type=str, default=None,
help='')
args = parser.parse_args()
return args
args = parse_args()
args.rgb_max = 255.0
args.fp16 = False
net = FlowNet2(args, requires_grad=False)
checkpoint = torch.load("./flownet/FlowNet2_checkpoint.pth.tar")
net.load_state_dict(checkpoint['state_dict'])
net=net.cuda()
flow_warp = Resample2d()
flow_warp=flow_warp.cuda()
tasks = []
#cur_dir = '/new_data/result_rain_8_8'
next_tar_dir = args.dataset_path_next
#cur_dir = '/new_data/result_issam'
cur_dir = args.cur_result
next_dir = args.next_result
cur_tar_dir = args.dataset_path
mean = (0.485 * 255, 0.456 * 255, 0.406 * 255)
std = (0.229 * 255, 0.224 * 255, 0.225 * 255)
mean = torch.tensor([.485*255, .456*255, .406*255], dtype=torch.float32).view(1, 3, 1, 1).cuda(
)
std = torch.tensor([.229*255, .224*255, .225*255], dtype=torch.float32).view(1, 3, 1, 1).cuda()
final_tasks = set([])
f = open('tl_task.txt', 'r')
for line in f.readlines():
line = line.strip()
final_tasks.add(line)
def save_image2(bgr_image, result_name):
torch.clamp(bgr_image, 0, 1)
bgr_image = bgr_image.detach().cpu().numpy() * 255
bgr_image = bgr_image.astype(np.uint8)
bgr_image = np.transpose(bgr_image, (1, 2, 0))
if bgr_image.shape[0] == 1:
cv2.imwrite(
result_name,
bgr_image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
return
#rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_RGB2BGR)
rgb_image = bgr_image
cv2.imwrite(
result_name,
rgb_image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
def save_image(bgr_image, result_name):
torch.clamp(bgr_image, 0, 1)
bgr_image = bgr_image.detach().cpu().numpy() * 255
bgr_image = bgr_image.astype(np.uint8)
bgr_image = np.transpose(bgr_image, (1, 2, 0))
if bgr_image.shape[0] == 1:
cv2.imwrite(
result_name,
bgr_image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
return
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_RGB2BGR)
rgb_image = rgb_image
cv2.imwrite(
result_name,
rgb_image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
with open('./test_frames.txt', 'r') as f:
for line in f.readlines():
line = line.strip()
tasks.append(line)
total_fmse = 0
total_ori_fmse = 0
t1 = time.time()
or_tls = []
max_ori = 0
count = 0
for i, task in enumerate(tasks):
task = task.strip()
if task not in final_tasks:
continue
count += 1
if i % 100 == 0:
t2 =time.time()
print(i, t2-t1)
t1 = time.time()
video, obj, img_num = task.split()[1].split('/')[-3:]
cur_name = video + '_' + obj + '_' + img_num[:-3] + 'npy'
cur_name_without_obj = video + '_' + img_num[:-3] + 'npy'
next_name = video + '_' + obj + '_' + '%05d' % (int(img_num[:-4]) + 1) + '.npy'
next_name_without_obj = video + '_' + '%05d' % (int(img_num[:-4]) + 1) + '.npy'
cur_target_name = os.path.join(cur_tar_dir, task.split()[0])
cur_original_pic = torch.from_numpy(val_augmentator(image=cv2.imread(cur_target_name))["image"][:, :, ::-1].transpose(2, 0, 1).copy()).cuda().unsqueeze(0).float()
used_cur_original_pic = cur_original_pic / 255
pre, obj, num = task.split()[0].split('/')
num = '%05d' % (int(num[:-4]) + 1) + num[-4:]
next_tar_name = os.path.join(next_tar_dir, pre + '/' + obj + '/' + num)
assert os.path.exists(next_tar_name)
next_original_pic = torch.from_numpy(val_augmentator(image=cv2.imread(next_tar_name))["image"][:, :, ::-1].transpose(2, 0, 1).copy()).cuda().unsqueeze(0).float()
used_next_original_pic = next_original_pic / 255
cur_tensor_name = os.path.join(cur_dir, cur_name)
cur_tensor_name = cur_tensor_name[:-4] + '.npy'
cur_pic = torch.from_numpy(np.load(cur_tensor_name)).cuda().float()
next_tensor_name = os.path.join(next_dir, next_name)
next_tensor_name = next_tensor_name[:-4] + '.npy'
next_pic = torch.from_numpy(np.load(next_tensor_name)).cuda().float()
cur_mask = cv2.cvtColor(cv2.imread(os.path.join(cur_tar_dir, task.split()[1])), cv2.COLOR_BGR2RGB)[:, :, 0].astype(np.float32) / 255.
cur_mask = val_augmentator(object_mask=cur_mask, image=cv2.imread(next_tar_name))['object_mask']
cur_mask = torch.from_numpy(cur_mask).cuda().unsqueeze(0)
with torch.no_grad():
flow = net(next_original_pic, cur_original_pic)
cur_mask = torch.reshape(cur_mask, (1, 1, 256, 256))
warp_cur_tensor = flow_warp(cur_pic, flow)
ori_warp_cur_tensor = flow_warp(used_cur_original_pic, flow)
warp_cur_mask = flow_warp(cur_mask, flow)
dif = torch.exp(-torch.abs(ori_warp_cur_tensor - used_next_original_pic))
dif = torch.sum(dif, dim = 1)/3
dif =torch.reshape(dif, (1,1,256,256))
final_mask = warp_cur_mask *dif
fmse = ((warp_cur_tensor * final_mask - next_pic*final_mask)**2).sum() * 255 * 255 / final_mask.sum()
fmse_ori = ((ori_warp_cur_tensor * final_mask - used_next_original_pic*final_mask)**2).sum() * 255 * 255 / final_mask.sum()
total_fmse += fmse
total_ori_fmse += fmse_ori
print("in total {} pairs, current tl loss is {} and original tl loss is {}".format(len(final_tasks),
"%.2f" % (float(total_fmse.detach().cpu().numpy()) / len(final_tasks)),
"%.2f" % (float(total_ori_fmse.detach().cpu().numpy()) / len(final_tasks))))
| 6,396 | 34.148352 | 166 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/scripts/evaluate_model.py | import argparse
import sys
sys.path.insert(0, '.')
import torch
import os
import time
import numpy as np
import cv2
from iharm.utils import pytorch_ssim
from iharm.utils.misc import load_weights
from skimage.measure import compare_mse as mse
from skimage.measure import compare_psnr as psnr
from skimage.measure import compare_ssim as ssim
from tqdm import tqdm
from torch.utils.data import DataLoader
from iharm.data.compose import ComposeDataset, MyPreviousDataset, MyPreviousSequenceDataset
from pathlib import Path
from iharm.inference.transforms import NormalizeTensor, PadToDivisor, ToTensor, AddFlippedTensor
from torchvision import transforms
from iharm.model.base import SSAMvideoLutWithoutDbp, SSAMvideoLut
from albumentations import Resize, NoOp
from iharm.data.hdataset import HDataset
from iharm.data.transforms import HCompose, LongestMaxSizeIfLarger
from iharm.inference.predictor import Predictor
from iharm.inference.evaluation import evaluate_dataset
from iharm.inference.metrics import MetricsHub, MSE, fMSE, PSNR, N, AvgPredictTime
from iharm.inference.utils import load_model, find_checkpoint
from iharm.mconfigs import ALL_MCONFIGS
from iharm.utils.exp import load_config_file
from iharm.utils.log import logger, add_new_file_output_to_logger
tmp_transform = [
ToTensor(torch.device(int(4))),
#transforms.Normalize(model_cfg.input_normalization['mean'], model_cfg.input_normalization['std']),
]
def _save_image(bgr_image, result_name):
#print(bgr_image.max(), bgr_image.min(), bgr_image.shape)
bgr_image = torch.clamp(bgr_image, 0, 1)
bgr_image = bgr_image.detach().cpu().numpy()* 255
bgr_image = bgr_image.astype(np.uint8)
bgr_image = np.transpose(bgr_image, (1, 2, 0))
if bgr_image.shape[0] == 1:
cv2.imwrite(
result_name,
bgr_image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
return
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(
result_name,
rgb_image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
RESIZE_STRATEGIES = {
'None': NoOp(),
'LimitLongest1024': LongestMaxSizeIfLarger(1024),
'Fixed256': Resize(256, 256),
'Fixed512': Resize(512, 512)
}
def parse_args():
parser = argparse.ArgumentParser()
'''
parser.add_argument('checkpoint', type=str,
help='The path to the checkpoint. '
'This can be a relative path (relative to cfg.MODELS_PATH) '
'or an absolute path. The file extension can be omitted.')
'''
parser.add_argument('--resize-strategy', type=str, choices=RESIZE_STRATEGIES.keys(), default='Fixed256')
parser.add_argument('--use-flip', action='store_true', default=False,
help='Use horizontal flip test-time augmentation.')
parser.add_argument('--use_feature', action='store_true', default=False,
help='Use horizontal flip test-time augmentation.')
parser.add_argument('--backbone_type', type=str, default='issam',
help='Use horizontal flip test-time augmentation.')
parser.add_argument('--write_lut_output', type=str, default="",
help='directory of write lut results for training')
parser.add_argument('--write_lut_map', type=str, default="",
help='directory of write lut results for training')
parser.add_argument('--normalize_outside', action='store_true', default=False,
help='Use horizontal flip test-time augmentation.')
parser.add_argument('--gpu', type=str, default=0, help='ID of used GPU.')
parser.add_argument('--config-path', type=str, default='./config.yml',
help='The path to the config file.')
parser.add_argument('--eval-prefix', type=str, default='')
parser.add_argument('--train_list', type=str, default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
parser.add_argument('--val_list', type=str, default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
parser.add_argument('--write_npy_backbone', action='store_true',
help='write npy backbone reult')
parser.add_argument('--write_npy_result', action='store_true',
help='write npy final result.')
parser.add_argument('--backbone_npy_dir', type=str, default="")
parser.add_argument('--result_npy_dir', type=str, default="")
parser.add_argument('--dataset_path', type=str, default=None,
help='')
parser.add_argument('--previous_num', type=int, default=1)
parser.add_argument('--future_num', type=int, default=1)
parser.add_argument('--backbone', type=str, default="", help='')
parser.add_argument("--checkpoint", type=str, default="", help='')
args = parser.parse_args()
return args
def denormalize(tensor):
mean = torch.tensor([.485, .456, .406], dtype=torch.float32).view(1, 3, 1, 1)
std = torch.tensor([.229, .224, .225], dtype=torch.float32).view(1, 3, 1, 1)
mean = mean.to(tensor.device)
std = std.to(tensor.device)
#self.init_device(tensor.device)
return tensor * std + mean
def main(given_number):
t1 = time.time()
args = parse_args()
#checkpoint_path = find_checkpoint(cfg.MODELS_PATH, args.checkpoint)
previous_number = args.previous_num
future_number = args.future_num
#previous_number = given_number
#future_number = given_number
crop_size = (256, 256)
val_augmentator = HCompose([
Resize(*crop_size)
])
logger.info(vars(args))
device = torch.device(f'cuda:{args.gpu}')
#net = load_model(args.model_type, checkpoint_path, verbose=True)
if args.checkpoint:
net = SSAMvideoLut(
depth=4, backbone_path=args.backbone, with_lutoutput=False, need_normalize=not args.normalize_outside,
need_denormalize=not args.normalize_outside, use_feature=args.use_feature, backbone_type = args.backbone_type
)
load_weights(net, args.checkpoint)
print("load checkpoint")
else:
net = SSAMvideoLutWithoutDbp(
depth=4, backbone_path = args.backbone, fix_threshold = 0.1, k_threshold = 0.1, use_lutoutput=False,
write_dir = args.write_dir ,backbone_type = args.backbone_type)
net.eval()
net.to(torch.device(int(args.gpu)))
if args.normalize_outside:
mean = [.485, .456, .406]
std = [.229, .224, .225]
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
else:
input_transform = transforms.Compose([
transforms.ToTensor(),
])
valset = MyPreviousSequenceDataset(
args.val_list, args.dataset_path, previous_number, future_number,
augmentator=val_augmentator,
input_transform=input_transform,
keep_background_prob=-1, with_previous=True
)
batch_size = 32
valdata = DataLoader(
valset, batch_size, shuffle=False,
drop_last=False, pin_memory=False,
num_workers=8
)
original_mse = 0
original_fmse = 0
cur_to_tar = 0
cur_to_iss = 0
cur_to_iss_f = 0
backbone_to_tar = 0
cur_psnr = 0
total_fore_ground = 0
backbone_psnr = 0
fmse_tar = 0
fmse = 0
total = 0
cur_ssim = 0
backbone_ssim = 0
total_me = 0
tbar = tqdm(valdata, ncols=80)
original_psnr = 0
invalid_ratio = 0
start_time = time.time()
for q, batch_data in enumerate(tbar):
names = batch_data['name']
#continue
batch_data = {k: v.to(torch.device(int(args.gpu))) for k, v in batch_data.items() if k != 'name'}
images, masks = batch_data['images'], batch_data['masks']
target_images = batch_data['target_images']
total += target_images.shape[0]
previous = {'images': batch_data['pre_images'], 'masks': batch_data['pre_masks']}
direct_lutoutput = batch_data['lut_output'] if 'lut_output' in batch_data else None
outs = net(images, masks, previous=previous, names=names, direct_lutoutput=direct_lutoutput, direct_lut_map = None)
if args.normalize_outside:
outs['images'] = denormalize(outs['images'])
target_images = denormalize(target_images)
predicted_images = outs['images']
backbone_outs = outs["backbone_out"]
for b in range(images.shape[0]):
video, obj, img_number = names[b].split('/')[-3:]
write_name = video + '_' + obj + '_' + img_number
'''
_save_image(predicted_images[b], './try_result/ours/' + write_name)
_save_image(backbone_outs[b], './try_result/backbone/' + write_name)
_save_image(images[b], './try_result/ori/' + write_name)
_save_image(target_images[b], './try_result/gt/' + write_name)
'''
_save_image(backbone_outs[b], './try_result/huang/' + write_name)
if not args.checkpoint:
total_me += outs["me"]
invalid_ratio += outs["invalid"]
for b in range(images.shape[0]):
ssim_score, fssim_score = pytorch_ssim.ssim(torch.clamp(predicted_images[b:b + 1, :, :, :] * 255, 0, 255),
torch.clamp(target_images[b:b + 1, :, :, :] * 255, 0, 255),
window_size=11, mask=masks[b:b + 1, :, :, :])
cur_ssim += fssim_score
backbone_ssim_score, backbone_fssim_score = pytorch_ssim.ssim(
torch.clamp(backbone_outs[b:b + 1, :, :, :] * 255, 0, 255),
torch.clamp(target_images[b:b + 1, :, :, :] * 255, 0, 255), window_size=11,
mask=masks[b:b + 1, :, :, :])
backbone_ssim += backbone_fssim_score
if args.write_npy_backbone:
for b in range(images.shape[0]):
video, obj, img_number = names[b].split('/')[-3:]
backbone_name = os.path.join(
args.backbone_npy_dir,
video + '_' + obj + '_' + img_number[:-3] + 'npy')
np.save(backbone_name, backbone_outs[b:b + 1, :, :, :].detach().cpu().numpy())
if args.write_npy_result:
for b in range(images.shape[0]):
video, obj, img_number = names[b].split('/')[-3:]
result_name = os.path.join(
args.result_npy_dir,
video + '_' + obj + '_' + img_number[:-3] + 'npy')
np.save(result_name, predicted_images[b:b + 1, :, :, :].detach().cpu().numpy())
for i in range(predicted_images.shape[0]):
mask = masks[i]
for transform in reversed(tmp_transform):
with torch.no_grad():
image = transform.inv_transform(images[i])
predicted_image = transform.inv_transform(predicted_images[i])
target_image = transform.inv_transform(target_images[i])
backbone_out = transform.inv_transform(backbone_outs[i])
image = torch.clamp(image, 0, 255)
image = image.cpu().numpy()
predicted_image = torch.clamp(predicted_image, 0, 255)
predicted_image = predicted_image.cpu().numpy()
target_image = torch.clamp(target_image, 0, 255)
target_image = target_image.cpu().numpy()
mask = mask.cpu().numpy()
mask = mask.astype(np.uint8)
fore_ground = mask.sum()
total_fore_ground += fore_ground
backbone_out = torch.clamp(backbone_out, 0, 255)
backbone_out = backbone_out.cpu().numpy()
cur_psnr += psnr(predicted_image, target_image, data_range = predicted_image.max() - predicted_image.min())
backbone_psnr += psnr(backbone_out, target_image, data_range = backbone_out.max() - backbone_out.min())
original_psnr += psnr(image, target_image, data_range = image.max() - image.min())
original_mse += mse(image, target_image)
original_fmse += mse(image, target_image) * 256 * 256 / fore_ground
fmse += mse(backbone_out, target_image) * 256 * 256 / fore_ground
fmse_tar += mse(predicted_image, target_image) * 256 * 256 / fore_ground
cur_to_tar += mse(predicted_image, target_image)
cur_to_iss += mse(predicted_image, backbone_out)
cur_to_iss_f += mse(predicted_image, backbone_out) * 256 * 256 / fore_ground
backbone_to_tar += mse(backbone_out, target_image)
print(previous_number, future_number)
print("backbone fmse", fmse / total)
print("backbone mse:", backbone_to_tar/total)
print("current fmse:", fmse_tar / total)
print("current mse:", cur_to_tar / total)
print("backbone psnr:", backbone_psnr / total, "backbone ssim:", backbone_ssim / total)
print("current psnr:", cur_psnr / total, "current ssim:", cur_ssim / total)
print("me:", total_me / total)
print("invalid ratio:", invalid_ratio / total)
end_time = time.time()
print("cost:", end_time - start_time)
if __name__ == '__main__':
numbers = [1]
for number in numbers:
main(number)
| 13,539 | 37.356941 | 123 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/scripts/my_train.py | import argparse
import sys
sys.path.insert(0, '.')
import torch
import os
from functools import partial
import time
import numpy as np
import cv2
from iharm.utils.misc import load_weights
from skimage.measure import compare_mse as mse
from skimage.measure import compare_psnr as psnr
from skimage.measure import compare_ssim as ssim
from tqdm import tqdm
from torch.utils.data import DataLoader
from iharm.data.compose import ComposeDataset, MyPreviousDataset, MyPreviousSequenceDataset, MyDirectDataset
from pathlib import Path
from iharm.inference.transforms import NormalizeTensor, PadToDivisor, ToTensor, AddFlippedTensor
from torchvision import transforms
from iharm.model.base import SSAMvideoLutWithoutDbp, SSAMvideoLut, SSAMImageHarmonization
from albumentations import Resize, NoOp
from iharm.model import initializer
from iharm.data.transforms import HCompose, LongestMaxSizeIfLarger
from iharm.utils import misc
import math
from iharm.data.hdataset import HDataset
from iharm.data.transforms import HCompose, LongestMaxSizeIfLarger
from iharm.inference.predictor import Predictor
from iharm.inference.evaluation import evaluate_dataset
from iharm.inference.metrics import MetricsHub, MSE, fMSE, PSNR, N, AvgPredictTime
from iharm.inference.utils import load_model, find_checkpoint
from iharm.mconfigs import ALL_MCONFIGS
from iharm.utils.exp import load_config_file
from iharm.utils.log import logger, add_new_file_output_to_logger
#f = open("./refine_without_feature_withoutnoramlized/logs.txt", 'a')
def MaskWeight_MSE(pred, label, mask):
label = label.view(pred.size())
reduce_dims = misc.get_dims_with_exclusion(label.dim(), 0)
loss = (pred - label) ** 2
delimeter = pred.size(1) * torch.sum(mask, dim=reduce_dims)
loss = torch.sum(loss, dim=reduce_dims) / delimeter
loss = torch.mean(loss)
return loss
def parse_args():
parser = argparse.ArgumentParser()
'''
parser.add_argument('checkpoint', type=str,
help='The path to the checkpoint. '
'This can be a relative path (relative to cfg.MODELS_PATH) '
'or an absolute path. The file extension can be omitted.')
'''
#parser.add_argument('--resize-strategy', type=str, choices=RESIZE_STRATEGIES.keys(), default='Fixed256')
parser.add_argument('--gpu', type=str, default=0, help='ID of used GPU.')
parser.add_argument('--config-path', type=str, default='./config.yml',
help='The path to the config file.')
parser.add_argument('--backbone_type', type=str, default='issam',
help='Use horizontal flip test-time augmentation.')
parser.add_argument('--eval-prefix', type=str, default='')
parser.add_argument('--train_list', type=str, default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
parser.add_argument('--val_list', type=str, default=None,
help='Model weights will be loaded from the specified path if you use this argument.')
parser.add_argument('--dataset_path', type=str, default=None,
help='')
parser.add_argument('--exp_name', type=str, default=None,
help='')
parser.add_argument('--previous_num', type=int, default=1)
parser.add_argument('--future_num', type=int, default=1)
parser.add_argument('--epochs', type=int, default=120)
parser.add_argument('--backbone', type=str, default="", help='')
parser.add_argument('--use_feature', action='store_true', default=False, help='')
parser.add_argument('--normalize_inside', action='store_true', default=False, help='')
parser.add_argument('--lut_map_dir', default="", help='direct result of lut map')
parser.add_argument('--lut_output_dir', default="", help='direct result of lut output')
parser.add_argument("--checkpoint", type=str, default="", help='')
args = parser.parse_args()
return args
def evaluate(net, val_dataloader, args, epoch):
net.eval()
tbar = tqdm(val_dataloader, ncols=70)
total_loss = 0.0
for q, batch_data in enumerate(tbar):
names = batch_data['name']
batch_data = {k: v.to(torch.device(int(args.gpu))) for k, v in batch_data.items() if k != 'name'}
images, masks = batch_data['images'], batch_data['masks']
target_images = batch_data['target_images']
if len(args.lut_output_dir) > 0:
outs = net(images, masks, previous=None, names=names, direct_lutoutput=batch_data['lut_output'],
direct_lut_map=batch_data['lut_map'])
else:
previous = {'images': batch_data['pre_images'], 'masks': batch_data['pre_masks']}
outs = net(images, masks, previous=previous, names=names, direct_lutoutput=None, direct_lut_map=None)
loss = MaskWeight_MSE(outs['images'], target_images, masks)
total_loss += loss.item()
if q % 100 == 0:
print("------------")
with open("./{}/logs.txt".format(args.exp_name), 'a') as f:
f.write("val\t"+"epoch {}:\t".format(epoch) + str(q + 1) + '\t' + str(total_loss / (q + 1)) + '\n')
print("val\t"+"epoch {}:\t".format(epoch) + str(q + 1) + '\t' + str(total_loss / (q + 1)) + '\n')
print("final loss" , "epoch {}:\t".format(epoch), total_loss / (q+1) * 255 * 255)
with open("./{}/logs.txt".format(args.exp_name), 'a') as f:
f.write("val\t" +"epoch {}:\t".format(epoch) + "final loss\t" + str(total_loss / (q+1) * 255 * 255) + '\n')
def main():
args = parse_args()
net = SSAMvideoLut(
depth=4, backbone_path=args.backbone, with_lutoutput=True, need_normalize = args.normalize_inside, need_denormalize = args.normalize_inside,
use_feature=args.use_feature, backbone_type = args.backbone_type
)
net.to(torch.device(int(args.gpu)))
crop_size = (256, 256)
augmentator = HCompose([
Resize(*crop_size)
])
params = []
for name, param in net.named_parameters():
param_group = {'params': [param]}
if not param.requires_grad:
print("not grad")
params.append(param_group)
continue
if not math.isclose(getattr(param, 'lr_mult', 1.0), 1.0):
#logger.info(f'Applied lr_mult={param.lr_mult} to "{name}" parameter.')
param_group['lr'] = param_group.get('lr', base_lr) * param.lr_mult
params.append(param_group)
optimizer_params = {
'lr': 1e-5,
'betas': (0.9, 0.999), 'eps': 1e-8
}
alpha = 2.0
optimizer = torch.optim.AdamW(params, **optimizer_params)
net.load_backbone()
if len(args.checkpoint) > 0:
load_weights(net, args.checkpoint)
print("load checkpoint")
lr_scheduler = partial(torch.optim.lr_scheduler.MultiStepLR,
milestones=[105, 115], gamma=0.1)(optimizer=optimizer)
if args.normalize_inside:
input_transform = transforms.Compose([
transforms.ToTensor(),
])
else:
print('normal outside')
mean = [.485, .456, .406]
std = [.229, .224, .225]
input_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std),
])
previous_number = args.previous_num
future_number = args.future_num
if len(args.lut_map_dir) > 0:
valset = MyDirectDataset(args.val_list, args.dataset_path, backbone_type=args.backbone_type,
input_transform=input_transform,
augmentator=augmentator, lut_map_dir=args.lut_map_dir, lut_output_dir=args.lut_output_dir)
trainset = MyDirectDataset(args.train_list, args.dataset_path, backbone_type=args.backbone_type,
input_transform=input_transform,
augmentator=augmentator, lut_map_dir=args.lut_map_dir, lut_output_dir=args.lut_output_dir)
else:
valset = MyPreviousSequenceDataset(
args.val_list, args.dataset_path, previous_number, future_number,
augmentator=val_augmentator,
input_transform=input_transform,
keep_background_prob=-1, with_previous=True
)
trainset = MyPreviousSequenceDataset(
args.trainset, args.dataset_path, previous_number, future_number,
augmentator=val_augmentator,
input_transform=input_transform,
keep_background_prob=-1, with_previous=True
)
batch_size = 32
val_dataloader = DataLoader(
valset, batch_size, shuffle=False,
drop_last=False, pin_memory=True,
num_workers=8
)
train_dataloader = DataLoader(
trainset, batch_size, shuffle=True,
drop_last=False, pin_memory=True,
num_workers=8
)
if not os.path.exists(args.exp_name):
os.mkdir(args.exp_name)
for epoch in range(args.epochs):
net.train()
tbar = tqdm(train_dataloader, ncols=70)
total_loss = 0.0
for q, batch_data in enumerate(tbar):
names = batch_data['name']
batch_data = {k: v.to(torch.device(int(args.gpu))) for k, v in batch_data.items() if k != 'name'}
images, masks = batch_data['images'], batch_data['masks']
target_images = batch_data['target_images']
if len(args.lut_output_dir) > 0:
outs = net(images, masks, previous=None, names=names, direct_lutoutput=batch_data['lut_output'], direct_lut_map = batch_data['lut_map'])
else:
previous = {'images': batch_data['pre_images'], 'masks': batch_data['pre_masks']}
outs = net(images, masks, previous=previous, names=names, direct_lutoutput=None, direct_lut_map=None)
loss = MaskWeight_MSE(outs['images'], target_images, masks)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
lr_scheduler.step()
if q%100 == 0:
print("------------")
with open("./{}/logs.txt".format(args.exp_name), 'a') as f:
f.write("train\t" + "epoch {}:\t".format(epoch) +str(q+1) + '\t' + str(total_loss / (q+1)) + '\n')
print("train\t" +"epoch {}:\t".format(epoch) + str(q+1) + '\t' + str(total_loss / (q+1)) + '\n')
print("epoch {}:".format(epoch), total_loss)
torch.save(net.state_dict(), './{}/{}.pth'.format(args.exp_name, epoch+1))
torch.save(net.state_dict(), './{}/last_checkpoint.pth'.format(args.exp_name))
evaluate(net, val_dataloader, args, epoch)
main() | 10,799 | 43.081633 | 152 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/engine/simple_trainer.py | import os
import logging
from copy import deepcopy
from collections import defaultdict
import cv2
import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torchvision.transforms import Normalize
from iharm.inference.transforms import NormalizeTensor, PadToDivisor, ToTensor, AddFlippedTensor
from iharm.utils.log import logger, TqdmToLogger, SummaryWriterAvg
from iharm.utils.misc import save_checkpoint, load_weights
from .optimizer import get_optimizer
tmp_transform = [
ToTensor(torch.device(1)),
#transforms.Normalize(model_cfg.input_normalization['mean'], model_cfg.input_normalization['std']),
]
def _save_image2(result_name, bgr_image):
for transform in tmp_transform:
bgr_image = transform.inv_transform(bgr_image)
print(bgr_image)
print(bgr_image.max())
bgr_image = bgr_image.cpu().numpy().astype(np.uint8)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(
result_name,
rgb_image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
class SimpleHTrainer(object):
def __init__(self, model, cfg, model_cfg, loss_cfg,
trainset, valset,
optimizer='adam',
optimizer_params=None,
image_dump_interval=200,
checkpoint_interval=10,
tb_dump_period=25,
max_interactive_points=0,
lr_scheduler=None,
metrics=None,
additional_val_metrics=None,
net_inputs=('images', 'points'),
with_previous=False):
self.cfg = cfg
self.model_cfg = model_cfg
self.max_interactive_points = max_interactive_points
self.loss_cfg = loss_cfg
self.val_loss_cfg = deepcopy(loss_cfg)
self.tb_dump_period = tb_dump_period
self.net_inputs = net_inputs
self.with_previous = with_previous
if metrics is None:
metrics = []
self.train_metrics = metrics
self.val_metrics = deepcopy(metrics)
if additional_val_metrics is not None:
self.val_metrics.extend(additional_val_metrics)
self.checkpoint_interval = checkpoint_interval
self.image_dump_interval = image_dump_interval
self.task_prefix = ''
self.sw = None
self.trainset = trainset
self.valset = valset
self.train_data = DataLoader(
trainset, cfg.batch_size, shuffle=False,
drop_last=False, pin_memory=True,
num_workers=cfg.workers
)
self.val_data = DataLoader(
valset, cfg.val_batch_size, shuffle=False,
drop_last=False, pin_memory=True,
num_workers=cfg.workers
)
self.optim = get_optimizer(model, optimizer, optimizer_params)
logger.info(model)
self.device = cfg.device
self.net = model
self._load_weights()
if cfg.multi_gpu:
self.net = _CustomDP(self.net, device_ids=cfg.gpu_ids, output_device=cfg.gpu_ids[0])
self.net = self.net.to(self.device)
self.lr = optimizer_params['lr']
if lr_scheduler is not None:
self.lr_scheduler = lr_scheduler(optimizer=self.optim)
if cfg.start_epoch > 0:
for _ in range(cfg.start_epoch):
self.lr_scheduler.step()
else:
self.lr_scheduler = None
self.tqdm_out = TqdmToLogger(logger, level=logging.INFO)
if cfg.input_normalization:
mean = torch.tensor(cfg.input_normalization['mean'], dtype=torch.float32)
std = torch.tensor(cfg.input_normalization['std'], dtype=torch.float32)
self.denormalizator = Normalize((-mean / std), (1.0 / std))
else:
self.denormalizator = lambda x: x
def training(self, epoch):
if self.sw is None:
self.sw = SummaryWriterAvg(log_dir=str(self.cfg.LOGS_PATH),
flush_secs=10, dump_period=self.tb_dump_period)
log_prefix = 'Train' + self.task_prefix.capitalize()
tbar = tqdm(self.train_data, file=self.tqdm_out, ncols=100)
train_loss = 0.0
for metric in self.train_metrics:
metric.reset_epoch_stats()
self.net.train()
for i, batch_data in enumerate(tbar):
global_step = epoch * len(self.train_data) + i
#target_image = batch_data['target_images'][0]
#_save_image2("./target_img.jpg", target_image)
loss, losses_logging, splitted_batch_data, outputs = \
self.batch_forward(batch_data)
self.optim.zero_grad()
loss.backward()
self.optim.step()
batch_loss = loss.item()
train_loss += batch_loss
for loss_name, loss_values in losses_logging.items():
self.sw.add_scalar(tag=f'{log_prefix}Losses/{loss_name}',
value=np.array(loss_values).mean(),
global_step=global_step)
self.sw.add_scalar(tag=f'{log_prefix}Losses/overall',
value=batch_loss,
global_step=global_step)
for k, v in self.loss_cfg.items():
if '_loss' in k and hasattr(v, 'log_states') and self.loss_cfg.get(k + '_weight', 0.0) > 0:
v.log_states(self.sw, f'{log_prefix}Losses/{k}', global_step)
if self.image_dump_interval > 0 and global_step % self.image_dump_interval == 0:
with torch.no_grad():
self.save_visualization(splitted_batch_data, outputs, global_step, prefix='train')
self.sw.add_scalar(tag=f'{log_prefix}States/learning_rate',
value=self.lr if self.lr_scheduler is None else self.lr_scheduler.get_lr()[-1],
global_step=global_step)
tbar.set_description(f'Epoch {epoch}, training loss {train_loss/(i+1):.6f}')
for metric in self.train_metrics:
metric.log_states(self.sw, f'{log_prefix}Metrics/{metric.name}', global_step)
for metric in self.train_metrics:
self.sw.add_scalar(tag=f'{log_prefix}Metrics/epoch_{metric.name}',
value=metric.get_epoch_value(),
global_step=epoch, disable_avg=True)
save_checkpoint(self.net, self.cfg.CHECKPOINTS_PATH, prefix=self.task_prefix,
epoch=None, multi_gpu=self.cfg.multi_gpu)
if epoch % self.checkpoint_interval == 0:
save_checkpoint(self.net, self.cfg.CHECKPOINTS_PATH, prefix=self.task_prefix,
epoch=epoch, multi_gpu=self.cfg.multi_gpu)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
def validation(self, epoch):
if self.sw is None:
self.sw = SummaryWriterAvg(log_dir=str(self.cfg.LOGS_PATH),
flush_secs=10, dump_period=self.tb_dump_period)
log_prefix = 'Val' + self.task_prefix.capitalize()
tbar = tqdm(self.val_data, file=self.tqdm_out, ncols=100)
for metric in self.val_metrics:
metric.reset_epoch_stats()
num_batches = 0
val_loss = 0
losses_logging = defaultdict(list)
self.net.eval()
for i, batch_data in enumerate(tbar):
global_step = epoch * len(self.val_data) + i
loss, batch_losses_logging, splitted_batch_data, outputs = \
self.batch_forward(batch_data, validation=True)
for loss_name, loss_values in batch_losses_logging.items():
losses_logging[loss_name].extend(loss_values)
batch_loss = loss.item()
val_loss += batch_loss
num_batches += 1
#print("batch_loss:",batch_loss)
print(val_loss / num_batches)
tbar.set_description(f'Epoch {epoch}, validation loss: {val_loss/num_batches:.6f}')
for metric in self.val_metrics:
metric.log_states(self.sw, f'{log_prefix}Metrics/{metric.name}', global_step)
print("final loss:", val_loss / num_batches)
for loss_name, loss_values in losses_logging.items():
self.sw.add_scalar(tag=f'{log_prefix}Losses/{loss_name}', value=np.array(loss_values).mean(),
global_step=epoch, disable_avg=True)
for metric in self.val_metrics:
self.sw.add_scalar(tag=f'{log_prefix}Metrics/epoch_{metric.name}', value=metric.get_epoch_value(),
global_step=epoch, disable_avg=True)
self.sw.add_scalar(tag=f'{log_prefix}Losses/overall', value=val_loss / num_batches,
global_step=epoch, disable_avg=True)
def batch_forward(self, batch_data, validation=False):
metrics = self.val_metrics if validation else self.train_metrics
losses_logging = defaultdict(list)
with torch.set_grad_enabled(not validation):
names = batch_data['name']
batch_data = {k: v.to(self.device) for k, v in batch_data.items() if k != 'name'}
if 'lut_output' not in batch_data:
print('compute here')
images, masks = batch_data['images'], batch_data['masks']
if self.with_previous:
previous = {'images': batch_data['pre_images'], 'masks': batch_data['pre_masks']}
output = self.net(images, masks, previous=previous)
else:
output = self.net(images, masks)
else:
images, masks, lut_output = batch_data['images'], batch_data['masks'], batch_data['lut_output']
output = self.net(images, masks, lut_output = lut_output)
loss = 0.0
loss = self.add_loss('pixel_loss', loss, losses_logging, validation, output, batch_data)
with torch.no_grad():
for metric in metrics:
metric.update(
*(output.get(x) for x in metric.pred_outputs),
*(batch_data[x] for x in metric.gt_outputs)
)
return loss, losses_logging, batch_data, output
def add_loss(self, loss_name, total_loss, losses_logging, validation, net_outputs, batch_data):
loss_cfg = self.loss_cfg if not validation else self.val_loss_cfg
loss_weight = loss_cfg.get(loss_name + '_weight', 0.0)
if loss_weight > 0.0:
loss_criterion = loss_cfg.get(loss_name)
loss = loss_criterion(*(net_outputs.get(x) for x in loss_criterion.pred_outputs),
*(batch_data[x] for x in loss_criterion.gt_outputs))
loss = torch.mean(loss)
losses_logging[loss_name].append(loss.item())
loss = loss_weight * loss
total_loss = total_loss + loss
return total_loss
def save_visualization(self, splitted_batch_data, outputs, global_step, prefix):
output_images_path = self.cfg.VIS_PATH / prefix
if self.task_prefix:
output_images_path /= self.task_prefix
if not output_images_path.exists():
output_images_path.mkdir(parents=True)
image_name_prefix = f'{global_step:06d}'
def _save_image(suffix, image):
cv2.imwrite(
str(output_images_path / f'{image_name_prefix}_{suffix}.jpg'),
image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
images = splitted_batch_data['images']
target_images = splitted_batch_data['target_images']
object_masks = splitted_batch_data['masks']
image, target_image, object_mask = images[0], target_images[0], object_masks[0, 0]
image = (self.denormalizator(image).cpu().numpy() * 255).transpose((1, 2, 0))
target_image = (self.denormalizator(target_image).cpu().numpy() * 255).transpose((1, 2, 0))
object_mask = np.repeat((object_mask.cpu().numpy() * 255)[:, :, np.newaxis], axis=2, repeats=3)
predicted_image = (self.denormalizator(outputs['images'].detach()[0]).cpu().numpy() * 255).transpose((1, 2, 0))
predicted_image = np.clip(predicted_image, 0, 255)
viz_image = np.hstack((image, object_mask, target_image, predicted_image)).astype(np.uint8)
_save_image('reconstruction', viz_image[:, :, ::-1])
def _load_weights(self):
if self.cfg.weights is not None:
if os.path.isfile(self.cfg.weights):
load_weights(self.net, self.cfg.weights, verbose=True)
print("load weight here")
self.cfg.weights = None
else:
raise RuntimeError(f"=> no checkpoint found at '{self.cfg.weights}'")
elif self.cfg.resume_exp is not None:
checkpoints = list(self.cfg.CHECKPOINTS_PATH.glob(f'{self.cfg.resume_prefix}*.pth'))
assert len(checkpoints) == 1
checkpoint_path = checkpoints[0]
load_weights(self.net, str(checkpoint_path), verbose=True)
self.net = self.net.to(self.device)
class _CustomDP(torch.nn.DataParallel):
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
class SimpleHTrainer_tc_with_lutoutput(object):
def __init__(self, model, cfg, model_cfg, loss_cfg,
trainset, valset,
optimizer='adam',
optimizer_params=None,
image_dump_interval=200,
checkpoint_interval=10,
tb_dump_period=25,
max_interactive_points=0,
lr_scheduler=None,
metrics=None,
additional_val_metrics=None,
net_inputs=('images', 'points'),
with_previous=False):
self.cfg = cfg
self.model_cfg = model_cfg
self.max_interactive_points = max_interactive_points
self.loss_cfg = loss_cfg
self.val_loss_cfg = deepcopy(loss_cfg)
self.tb_dump_period = tb_dump_period
self.net_inputs = net_inputs
self.with_previous = with_previous
if metrics is None:
metrics = []
self.train_metrics = metrics
self.val_metrics = deepcopy(metrics)
if additional_val_metrics is not None:
self.val_metrics.extend(additional_val_metrics)
self.checkpoint_interval = checkpoint_interval
self.image_dump_interval = image_dump_interval
self.task_prefix = ''
self.sw = None
self.trainset = trainset
self.valset = valset
self.train_data = DataLoader(
trainset, cfg.batch_size, shuffle=True,
drop_last=True, pin_memory=True,
num_workers=cfg.workers
)
self.val_data = DataLoader(
valset, cfg.val_batch_size, shuffle=False,
drop_last=False, pin_memory=True,
num_workers=cfg.workers
)
self.optim = get_optimizer(model, optimizer, optimizer_params)
logger.info(model)
self.device = cfg.device
self.net = model
self._load_weights()
if cfg.multi_gpu:
self.net = _CustomDP(self.net, device_ids=cfg.gpu_ids, output_device=cfg.gpu_ids[0])
self.net = self.net.to(self.device)
self.lr = optimizer_params['lr']
if lr_scheduler is not None:
self.lr_scheduler = lr_scheduler(optimizer=self.optim)
if cfg.start_epoch > 0:
for _ in range(cfg.start_epoch):
self.lr_scheduler.step()
else:
self.lr_scheduler = None
self.tqdm_out = TqdmToLogger(logger, level=logging.INFO)
if cfg.input_normalization:
mean = torch.tensor(cfg.input_normalization['mean'], dtype=torch.float32)
std = torch.tensor(cfg.input_normalization['std'], dtype=torch.float32)
self.denormalizator = Normalize((-mean / std), (1.0 / std))
else:
self.denormalizator = lambda x: x
def training(self, epoch):
if self.sw is None:
self.sw = SummaryWriterAvg(log_dir=str(self.cfg.LOGS_PATH),
flush_secs=10, dump_period=self.tb_dump_period)
log_prefix = 'Train' + self.task_prefix.capitalize()
tbar = tqdm(self.train_data, file=self.tqdm_out, ncols=100)
train_loss = 0.0
tc_loss = 0.0
tar_loss = 0.0
for metric in self.train_metrics:
metric.reset_epoch_stats()
self.net.train()
for i, batch_data in enumerate(tbar):
global_step = epoch * len(self.train_data) + i
# target_image = batch_data['target_images'][0]
# _save_image2("./target_img.jpg", target_image)
total_loss, loss_tar, loss_tc, losses_logging, splitted_batch_data, outputs = \
self.batch_forward(batch_data)
self.optim.zero_grad()
total_loss.backward()
self.optim.step()
batch_loss = total_loss.item()
train_loss += batch_loss
tc_loss += loss_tc.item()
tar_loss += loss_tar.item()
for loss_name, loss_values in losses_logging.items():
self.sw.add_scalar(tag=f'{log_prefix}Losses/{loss_name}',
value=np.array(loss_values).mean(),
global_step=global_step)
self.sw.add_scalar(tag=f'{log_prefix}Losses/overall',
value=batch_loss,
global_step=global_step)
for k, v in self.loss_cfg.items():
if '_loss' in k and hasattr(v, 'log_states') and self.loss_cfg.get(k + '_weight', 0.0) > 0:
v.log_states(self.sw, f'{log_prefix}Losses/{k}', global_step)
if self.image_dump_interval > 0 and global_step % self.image_dump_interval == 0:
with torch.no_grad():
self.save_visualization(splitted_batch_data, outputs, global_step, prefix='train')
self.sw.add_scalar(tag=f'{log_prefix}States/learning_rate',
value=self.lr if self.lr_scheduler is None else self.lr_scheduler.get_lr()[-1],
global_step=global_step)
tbar.set_description(f'Epoch {epoch}, training loss {train_loss / (i + 1):.6f}, tar {tar_loss / (i + 1):.6f}, tc {tc_loss / (i + 1):.6f}')
for metric in self.train_metrics:
metric.log_states(self.sw, f'{log_prefix}Metrics/{metric.name}', global_step)
for metric in self.train_metrics:
self.sw.add_scalar(tag=f'{log_prefix}Metrics/epoch_{metric.name}',
value=metric.get_epoch_value(),
global_step=epoch, disable_avg=True)
save_checkpoint(self.net, self.cfg.CHECKPOINTS_PATH, prefix=self.task_prefix,
epoch=None, multi_gpu=self.cfg.multi_gpu)
if epoch % self.checkpoint_interval == 0:
save_checkpoint(self.net, self.cfg.CHECKPOINTS_PATH, prefix=self.task_prefix,
epoch=epoch, multi_gpu=self.cfg.multi_gpu)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
def validation(self, epoch):
if self.sw is None:
self.sw = SummaryWriterAvg(log_dir=str(self.cfg.LOGS_PATH),
flush_secs=10, dump_period=self.tb_dump_period)
log_prefix = 'Val' + self.task_prefix.capitalize()
tbar = tqdm(self.val_data, file=self.tqdm_out, ncols=100)
for metric in self.val_metrics:
metric.reset_epoch_stats()
num_batches = 0
val_loss = 0
losses_logging = defaultdict(list)
val_tc_loss = 0
val_tar_loss = 0
self.net.eval()
for i, batch_data in enumerate(tbar):
global_step = epoch * len(self.val_data) + i
total_loss, loss_tar, loss_tc, batch_losses_logging, splitted_batch_data, outputs = \
self.batch_forward(batch_data, validation=True)
for loss_name, loss_values in batch_losses_logging.items():
losses_logging[loss_name].extend(loss_values)
batch_loss = total_loss.item()
val_loss += batch_loss
val_tc_loss += loss_tc.item()
val_tar_loss += loss_tar.item()
num_batches += 1
# print("batch_loss:",batch_loss)
print(val_loss / num_batches)
tbar.set_description(f'Epoch {epoch}, validation loss: {val_loss / num_batches:.6f} val tc loss:{val_tc_loss / num_batches:.6f} val tar loss:{val_tar_loss / num_batches:.6f}')
for metric in self.val_metrics:
metric.log_states(self.sw, f'{log_prefix}Metrics/{metric.name}', global_step)
print("final loss:", val_loss / num_batches)
for loss_name, loss_values in losses_logging.items():
self.sw.add_scalar(tag=f'{log_prefix}Losses/{loss_name}', value=np.array(loss_values).mean(),
global_step=epoch, disable_avg=True)
for metric in self.val_metrics:
self.sw.add_scalar(tag=f'{log_prefix}Metrics/epoch_{metric.name}', value=metric.get_epoch_value(),
global_step=epoch, disable_avg=True)
self.sw.add_scalar(tag=f'{log_prefix}Losses/overall', value=val_loss / num_batches,
global_step=epoch, disable_avg=True)
def batch_forward(self, batch_data, validation=False):
metrics = self.val_metrics if validation else self.train_metrics
losses_logging = defaultdict(list)
with torch.set_grad_enabled(not validation):
names = batch_data['name']
batch_data = {k: v.to(self.device) for k, v in batch_data.items() if k != 'name'}
if 'lut_output' not in batch_data:
print('compute here')
images, masks = batch_data['images'], batch_data['masks']
if self.with_previous:
previous = {'images': batch_data['pre_images'], 'masks': batch_data['pre_masks']}
output = self.net(images, masks, previous=previous, names = names)
else:
output = self.net(images, masks)
else:
images, masks, lut_output = batch_data['images'], batch_data['masks'], batch_data['lut_output']
output = self.net(images, masks, lut_output=lut_output, names = names)
loss = 0.0
loss = self.add_loss('pixel_loss', loss, losses_logging, validation, output, batch_data)
loss_tc = 0.0
loss_tc = self.add_loss('tc_pixel_loss',loss_tc, losses_logging, validation, output, batch_data)
total_loss = loss + loss_tc
with torch.no_grad():
for metric in metrics:
metric.update(
*(output.get(x) for x in metric.pred_outputs),
*(batch_data[x] for x in metric.gt_outputs)
)
return total_loss, loss, loss_tc, losses_logging, batch_data, output
def add_loss(self, loss_name, total_loss, losses_logging, validation, net_outputs, batch_data):
loss_cfg = self.loss_cfg if not validation else self.val_loss_cfg
loss_weight = loss_cfg.get(loss_name + '_weight', 0.0)
if loss_weight > 0.0:
loss_criterion = loss_cfg.get(loss_name)
if loss_name == 'tc_pixel_loss':
loss = loss_criterion(net_outputs['images'], net_outputs['lut_output'], batch_data['masks'])
else:
loss = loss_criterion(*(net_outputs.get(x) for x in loss_criterion.pred_outputs),
*(batch_data[x] for x in loss_criterion.gt_outputs))
loss = torch.mean(loss)
losses_logging[loss_name].append(loss.item())
loss = loss_weight * loss
total_loss = total_loss + loss
return total_loss
def save_visualization(self, splitted_batch_data, outputs, global_step, prefix):
output_images_path = self.cfg.VIS_PATH / prefix
if self.task_prefix:
output_images_path /= self.task_prefix
if not output_images_path.exists():
output_images_path.mkdir(parents=True)
image_name_prefix = f'{global_step:06d}'
def _save_image(suffix, image):
cv2.imwrite(
str(output_images_path / f'{image_name_prefix}_{suffix}.jpg'),
image,
[cv2.IMWRITE_JPEG_QUALITY, 85]
)
images = splitted_batch_data['images']
target_images = splitted_batch_data['target_images']
object_masks = splitted_batch_data['masks']
image, target_image, object_mask = images[0], target_images[0], object_masks[0, 0]
image = (self.denormalizator(image).cpu().numpy() * 255).transpose((1, 2, 0))
target_image = (self.denormalizator(target_image).cpu().numpy() * 255).transpose((1, 2, 0))
object_mask = np.repeat((object_mask.cpu().numpy() * 255)[:, :, np.newaxis], axis=2, repeats=3)
predicted_image = (self.denormalizator(outputs['images'].detach()[0]).cpu().numpy() * 255).transpose((1, 2, 0))
predicted_image = np.clip(predicted_image, 0, 255)
viz_image = np.hstack((image, object_mask, target_image, predicted_image)).astype(np.uint8)
_save_image('reconstruction', viz_image[:, :, ::-1])
def _load_weights(self):
if self.cfg.weights is not None:
if os.path.isfile(self.cfg.weights):
load_weights(self.net, self.cfg.weights, verbose=True)
print("load weight here")
self.cfg.weights = None
else:
raise RuntimeError(f"=> no checkpoint found at '{self.cfg.weights}'")
elif self.cfg.resume_exp is not None:
checkpoints = list(self.cfg.CHECKPOINTS_PATH.glob(f'{self.cfg.resume_prefix}*.pth'))
assert len(checkpoints) == 1
checkpoint_path = checkpoints[0]
load_weights(self.net, str(checkpoint_path), verbose=True)
self.net = self.net.to(self.device)
| 26,698 | 42.413008 | 187 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/engine/optimizer.py | import torch
import math
from iharm.utils.log import logger
def get_optimizer(model, opt_name, opt_kwargs):
params = []
base_lr = opt_kwargs['lr']
for name, param in model.named_parameters():
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
if not math.isclose(getattr(param, 'lr_mult', 1.0), 1.0):
logger.info(f'Applied lr_mult={param.lr_mult} to "{name}" parameter.')
param_group['lr'] = param_group.get('lr', base_lr) * param.lr_mult
params.append(param_group)
optimizer = {
'sgd': torch.optim.SGD,
'adam': torch.optim.Adam,
'adamw': torch.optim.AdamW
}[opt_name.lower()](params, **opt_kwargs)
return optimizer
| 797 | 27.5 | 82 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/utils/pytorch_ssim.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import os
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True, mask=None):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if mask is not None:
mask_sum = mask.sum()
fg_ssim_map = ssim_map*mask
fg_ssim_map_sum = fg_ssim_map.sum(3).sum(2)
fg_ssim = fg_ssim_map_sum/mask_sum
fg_ssim_mu = fg_ssim.mean()
ssim_mu = ssim_map.mean()
return ssim_mu.item(), fg_ssim_mu.item()
# if size_average:
# return ssim_map.mean()
# else:
# return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True, mask=None):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average, mask) | 2,979 | 34.47619 | 104 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/utils/exp.py | import os
import sys
import shutil
import pprint
from pathlib import Path
from datetime import datetime
import yaml
import torch
from easydict import EasyDict as edict
from .log import logger, add_new_file_output_to_logger
def init_experiment(args):
model_path = Path(args.model_path)
ftree = get_model_family_tree(model_path)
if ftree is None:
print('Models can only be located in the "models" directory in the root of the repository')
sys.exit(1)
cfg = load_config(model_path)
update_config(cfg, args)
experiments_path = Path(cfg.EXPS_PATH)
exp_parent_path = experiments_path / '/'.join(ftree)
exp_parent_path.mkdir(parents=True, exist_ok=True)
if cfg.resume_exp:
exp_path = find_resume_exp(exp_parent_path, cfg.resume_exp)
else:
last_exp_indx = find_last_exp_indx(exp_parent_path)
exp_name = f'{last_exp_indx:03d}'
if cfg.exp_name:
exp_name += '_' + cfg.exp_name
exp_path = exp_parent_path / exp_name
exp_path.mkdir(parents=True)
cfg.EXP_PATH = exp_path
cfg.CHECKPOINTS_PATH = exp_path / 'checkpoints'
cfg.VIS_PATH = exp_path / 'vis'
cfg.LOGS_PATH = exp_path / 'logs'
cfg.LOGS_PATH.mkdir(exist_ok=True)
cfg.CHECKPOINTS_PATH.mkdir(exist_ok=True)
cfg.VIS_PATH.mkdir(exist_ok=True)
dst_script_path = exp_path / (model_path.stem + datetime.strftime(datetime.today(), '_%Y-%m-%d-%H-%M-%S.py'))
shutil.copy(model_path, dst_script_path)
print(cfg.ngpus)
if cfg.gpus != '':
gpu_ids = [int(id) for id in cfg.gpus.split(',')]
else:
gpu_ids = list(range(cfg.ngpus))
cfg.gpus = ','.join([str(id) for id in gpu_ids])
cfg.gpu_ids = gpu_ids
cfg.ngpus = len(gpu_ids)
cfg.multi_gpu = cfg.ngpus > 1
if cfg.multi_gpu:
#os.environ['CUDA_VISIBLE_DEVICES'] = cfg.gpus
ngpus = torch.cuda.device_count()
#assert ngpus == cfg.ngpus
cfg.device = torch.device(f'cuda:{cfg.gpu_ids[0]}')
add_new_file_output_to_logger(cfg.LOGS_PATH, prefix='train_')
logger.info(f'Number of GPUs: {len(cfg.gpu_ids)}')
logger.info('Run experiment with config:')
logger.info(pprint.pformat(cfg, indent=4))
return cfg
def get_model_family_tree(model_path, terminate_name='models'):
model_name = model_path.stem
family_tree = [model_name]
for x in model_path.parents:
if x.stem == terminate_name:
break
family_tree.append(x.stem)
else:
return None
return family_tree[::-1]
def find_last_exp_indx(exp_parent_path):
indx = 0
for x in exp_parent_path.iterdir():
if not x.is_dir():
continue
exp_name = x.stem
if exp_name[:3].isnumeric():
indx = max(indx, int(exp_name[:3]) + 1)
return indx
def find_resume_exp(exp_parent_path, exp_pattern):
candidates = sorted(exp_parent_path.glob(f'{exp_pattern}*'))
if len(candidates) == 0:
print(f'No experiments could be found that satisfies the pattern = "*{exp_pattern}"')
sys.exit(1)
elif len(candidates) > 1:
print('More than one experiment found:')
for x in candidates:
print(x)
sys.exit(1)
else:
exp_path = candidates[0]
print(f'Continue with experiment "{exp_path}"')
return exp_path
def update_config(cfg, args):
for param_name, value in vars(args).items():
if param_name.lower() in cfg or param_name.upper() in cfg:
continue
cfg[param_name] = value
def load_config(model_path):
model_name = model_path.stem
config_path = model_path.parent / (model_name + '.yml')
if config_path.exists():
cfg = load_config_file(config_path)
else:
cfg = dict()
cwd = Path.cwd()
config_parent = config_path.parent.absolute()
while len(config_parent.parents) > 0:
config_path = config_parent / 'config.yml'
if config_path.exists():
local_config = load_config_file(config_path, model_name=model_name)
cfg.update({k: v for k, v in local_config.items() if k not in cfg})
if config_parent.absolute() == cwd:
break
config_parent = config_parent.parent
return edict(cfg)
def load_config_file(config_path, model_name=None, return_edict=False):
with open(config_path, 'r') as f:
cfg = yaml.safe_load(f)
if 'SUBCONFIGS' in cfg:
if model_name is not None and model_name in cfg['SUBCONFIGS']:
cfg.update(cfg['SUBCONFIGS'][model_name])
del cfg['SUBCONFIGS']
return edict(cfg) if return_edict else cfg
| 4,654 | 28.09375 | 113 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/utils/misc.py | import torch
from .log import logger
def get_dims_with_exclusion(dim, exclude=None):
dims = list(range(dim))
if exclude is not None:
dims.remove(exclude)
return dims
def save_checkpoint(net, checkpoints_path, epoch=None, prefix='', verbose=True, multi_gpu=False):
if epoch is None:
checkpoint_name = 'last_checkpoint.pth'
else:
checkpoint_name = f'{epoch:03d}.pth'
if prefix:
checkpoint_name = f'{prefix}_{checkpoint_name}'
if not checkpoints_path.exists():
checkpoints_path.mkdir(parents=True)
checkpoint_path = checkpoints_path / checkpoint_name
if verbose:
logger.info(f'Save checkpoint to {str(checkpoint_path)}')
state_dict = net.module.state_dict() if multi_gpu else net.state_dict()
torch.save(state_dict, str(checkpoint_path))
def load_weights(model, path_to_weights, verbose=True):
if verbose:
logger.info(f'Load checkpoint from path: {path_to_weights}')
current_state_dict = model.state_dict()
new_state_dict = torch.load(str(path_to_weights), map_location='cpu')
current_state_dict.update(new_state_dict)
model.load_state_dict(current_state_dict)
| 1,191 | 27.380952 | 97 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/utils/log.py | import io
import time
import logging
from datetime import datetime
import numpy as np
from torch.utils.tensorboard import SummaryWriter
LOGGER_NAME = 'root'
LOGGER_DATEFMT = '%Y-%m-%d %H:%M:%S'
handler = logging.StreamHandler()
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def add_new_file_output_to_logger(logs_path, prefix, only_message=False):
log_name = prefix + datetime.strftime(datetime.today(), '%Y-%m-%d_%H-%M-%S') + '.log'
logs_path.mkdir(exist_ok=True, parents=True)
stdout_log_path = logs_path / log_name
fh = logging.FileHandler(str(stdout_log_path))
fmt = '%(message)s' if only_message else '(%(levelname)s) %(asctime)s: %(message)s'
formatter = logging.Formatter(fmt=fmt, datefmt=LOGGER_DATEFMT)
fh.setFormatter(formatter)
logger.addHandler(fh)
class TqdmToLogger(io.StringIO):
logger = None
level = None
buf = ''
def __init__(self, logger, level=None, mininterval=5):
super(TqdmToLogger, self).__init__()
self.logger = logger
self.level = level or logging.INFO
self.mininterval = mininterval
self.last_time = 0
def write(self, buf):
self.buf = buf.strip('\r\n\t ')
def flush(self):
if len(self.buf) > 0 and time.time() - self.last_time > self.mininterval:
self.logger.log(self.level, self.buf)
self.last_time = time.time()
class SummaryWriterAvg(SummaryWriter):
def __init__(self, *args, dump_period=20, **kwargs):
super().__init__(*args, **kwargs)
self._dump_period = dump_period
self._avg_scalars = dict()
def add_scalar(self, tag, value, global_step=None, disable_avg=False):
if disable_avg or isinstance(value, (tuple, list, dict)):
super().add_scalar(tag, np.array(value), global_step=global_step)
else:
if tag not in self._avg_scalars:
self._avg_scalars[tag] = ScalarAccumulator(self._dump_period)
avg_scalar = self._avg_scalars[tag]
avg_scalar.add(value)
if avg_scalar.is_full():
super().add_scalar(tag, avg_scalar.value,
global_step=global_step)
avg_scalar.reset()
class ScalarAccumulator(object):
def __init__(self, period):
self.sum = 0
self.cnt = 0
self.period = period
def add(self, value):
self.sum += value
self.cnt += 1
@property
def value(self):
if self.cnt > 0:
return self.sum / self.cnt
else:
return 0
def reset(self):
self.cnt = 0
self.sum = 0
def is_full(self):
return self.cnt >= self.period
def __len__(self):
return self.cnt
| 2,809 | 27.1 | 89 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/data/base.py | import random
import numpy as np
import torch
import cv2
import os
class BaseHDataset(torch.utils.data.dataset.Dataset):
def __init__(self,
augmentator=None,
input_transform=None,
keep_background_prob=0.0,
with_image_info=False,
epoch_len=-1,
with_previous=False):
super(BaseHDataset, self).__init__()
self.epoch_len = epoch_len
self.input_transform = input_transform
self.augmentator = augmentator
self.keep_background_prob = keep_background_prob
self.with_image_info = with_image_info
self.with_previous = with_previous
if input_transform is None:
input_transform = lambda x: x
self.input_transform = input_transform
self.dataset_samples = None
def __getitem__(self, index):
if self.epoch_len > 0:
index = random.randrange(0, len(self.dataset_samples))
sample = self.get_sample(index)
self.check_sample_types(sample)
#print(sample['pre_image'].shape)
pre_imgs = []
pre_masks = []
names = sample['name']
if self.with_previous:
for index in range(len(sample['pre_image'])):
#print("--------",np.unique(sample['pre_object_mask'][index]), np.unique(sample['object_mask'][index]))
#print(sample['pre_image'][index].shape, sample['image'].shape)
tmp = self.augment_sample({
'image': sample['image'],
'object_mask': sample['object_mask'],
'target_image': sample['target_image'],
'image_id': sample['image_id'],
'pre_image': sample['pre_image'][index],
'pre_object_mask': sample['pre_object_mask'][index],
})
pre_imgs.append(tmp['pre_image'])
pre_masks.append(tmp['pre_object_mask'])
new_img = tmp['image']
new_object_mask = tmp['object_mask']
new_target_img = tmp['target_image']
else:
sample = self.augment_sample(sample)
name = sample['name']
video, obj, img_number = name.split('/')[-3:]
new_img = self.input_transform(new_img)
new_target_img = self.input_transform(new_target_img)
new_object_mask = new_object_mask.astype(np.float32)
for index in range(len(pre_imgs)):
pre_imgs[index] = self.input_transform(pre_imgs[index])
pre_masks[index] = pre_masks[index].astype(np.float32)
pre_masks[index] = pre_masks[index][np.newaxis, ...].astype(np.float32)
pre_imgs = torch.stack(pre_imgs, dim = 0)
pre_masks = np.array(pre_masks)
if self.with_previous:
output = {
'name':sample['name'],
'images': new_img,
'masks': new_object_mask[np.newaxis, ...].astype(np.float32),
'target_images': new_target_img,
'pre_images': pre_imgs,
'pre_masks': pre_masks
}
else:
output = {
'name': sample['name'],
'images': image,
'masks': obj_mask[np.newaxis, ...].astype(np.float32),
'target_images': target_image
}
if self.with_image_info and 'image_id' in sample:
output['image_info'] = sample['image_id']
return output
def check_sample_types(self, sample):
assert sample['image'].dtype == 'uint8'
if 'target_image' in sample:
assert sample['target_image'].dtype == 'uint8'
def augment_sample(self, sample):
if self.augmentator is None:
return sample
additional_targets = {target_name: sample[target_name]
for target_name in self.augmentator.additional_targets.keys()}
valid_augmentation = False
while not valid_augmentation:
aug_output = self.augmentator(image=sample['image'], **additional_targets)
valid_augmentation = self.check_augmented_sample(sample, aug_output)
for target_name, transformed_target in aug_output.items():
sample[target_name] = transformed_target
return sample
def check_augmented_sample(self, sample, aug_output):
if self.keep_background_prob < 0.0 or random.random() < self.keep_background_prob:
return True
return aug_output['object_mask'].sum() > 1.0
def get_sample(self, index):
raise NotImplementedError
def __len__(self):
if self.epoch_len > 0:
return self.epoch_len
else:
return len(self.dataset_samples)
| 4,758 | 36.179688 | 119 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/data/compose.py | from .base import BaseHDataset
import cv2
import numpy as np
import copy
import os
import time
import torch
class MyDirectDataset:
def __init__(self, val_list, dataset_path, backbone_type = 'issam', input_transform=None, augmentator=None, lut_map_dir='', lut_output_dir=''):
start_time = time.time()
self.tasks = []
self.dataset_path = dataset_path
self.input_transform = input_transform
self.backbone_type = backbone_type
self.augmentator = augmentator
self.lut_map_dir = lut_map_dir
self.lut_output_dir = lut_output_dir
with open(val_list, 'r') as f:
for line in f.readlines():
tar_name, mask_name, cur_name = line.split()
tar_name = tar_name.replace('\\', '/')
mask_name = mask_name.replace('\\', '/')
cur_name = cur_name.replace('\\', '/')
cur_name = os.path.join(self.dataset_path, cur_name)
mask_name = os.path.join(self.dataset_path, mask_name)
tar_name = os.path.join(self.dataset_path, tar_name)
self.tasks.append([tar_name, mask_name, cur_name])
def __getitem__(self, index):
sample = {}
tar_name, mask_name, cur_name = self.tasks[index]
cur_img = self.augmentator(image=cv2.imread(cur_name))["image"][:, :, ::-1].copy()
cur_mask = cv2.cvtColor(cv2.imread(mask_name), cv2.COLOR_BGR2RGB)[:, :, 0].astype(np.float32) / 255.
cur_mask = self.augmentator(object_mask=cur_mask, image=cv2.imread(tar_name))['object_mask']
tar_img = self.augmentator(image=cv2.imread(tar_name))["image"][:, :, ::-1].copy()
video, obj, img_number = cur_name.split('/')[-3:]
lut_output_name = os.path.join(self.lut_output_dir, video + '_' + obj + '_' + img_number[:-4] + '.npy')
lut_map_name = os.path.join(self.lut_map_dir, video + '_' + obj + '_' + img_number[:-4] + '.npy')
assert os.path.exists(lut_output_name)
assert os.path.exists(lut_map_name)
lut_output = np.load(lut_output_name)
lut_map = np.load(lut_map_name)
cur_img = self.input_transform(cur_img)
tar_img = self.input_transform(tar_img)
sample['images'] = cur_img
sample['masks'] = cur_mask[np.newaxis, ...].astype(np.float32)
sample['target_images'] = tar_img
sample['name'] = cur_name
sample['lut_output'] = torch.from_numpy(lut_output)
sample['lut_map'] = torch.from_numpy(lut_map)
return sample
def __len__(self):
return len(self.tasks)
class MyPreviousSequenceDataset(BaseHDataset):
def __init__(self, dataset_list, dataset_path, previous_num, future_num, **kwargs):
super(MyPreviousSequenceDataset, self).__init__(**kwargs)
self.dataset_path = dataset_path
self.dataset_samples = []
self.previous_num = previous_num
self.future_num = future_num
with open(dataset_list, 'r') as f:
for line in f.readlines():
real_img_name, cur_mask_name, cur_img_name = line.strip().split()
cur_img_name = cur_img_name.replace('\\', '/')
cur_mask_name = cur_mask_name.replace('\\', '/')
real_img_name = real_img_name.replace('\\', '/')
cur_img_name = os.path.join(self.dataset_path, cur_img_name)
cur_mask_name = os.path.join(self.dataset_path, cur_mask_name)
real_img_name = os.path.join(self.dataset_path, real_img_name)
path, number = os.path.split(cur_img_name)
mask_path, mask_number = os.path.split(cur_mask_name)
pre_img_names = []
pre_mask_names = []
future_img_names = []
future_mask_names = []
for p in range(1, previous_num + 1):
pre_number = '%05d' % (int(number[:-4])-5 *p) + number[-4:]
pre_mask_number = '%05d' % (int(mask_number[:-4]) -5*p) + mask_number[-4:]
#print(pre_mask_number)
pre_img_name = os.path.join(path, pre_number)
pre_mask_name = os.path.join(mask_path, pre_mask_number)
if not os.path.exists(pre_mask_name):
if len(pre_img_names) > 0:
pre_img_name = pre_img_names[-1]
pre_mask_name = pre_mask_names[-1]
else:
pre_img_name = cur_img_name
pre_mask_name = cur_mask_name
pre_img_names.append(pre_img_name)
pre_mask_names.append(pre_mask_name)
for p in range(1, future_num + 1):
future_number = '%05d' % (int(number[:-4]) + 5 *p) + number[-4:]
future_mask_number = '%05d' % (int(mask_number[:-4]) + 5*p) + mask_number[-4:]
#print(pre_mask_number)
future_img_name = os.path.join(path, future_number)
future_mask_name = os.path.join(mask_path, future_mask_number)
if not os.path.exists(future_mask_name):
#future_img_name = "no pic"
#future_mask_name = "no pic"
if len(future_mask_names) > 0:
future_img_name = future_img_names[-1]
future_mask_name = future_mask_names[-1]
else:
future_img_name = cur_img_name
future_mask_name = cur_mask_name
future_img_names.append(future_img_name)
future_mask_names.append(future_mask_name)
self.dataset_samples.append((cur_img_name, cur_mask_name, pre_img_names, pre_mask_names, real_img_name, future_img_names, future_mask_names))
def get_sample(self, index):
cur_img_name, cur_mask_name, pre_img_names, pre_mask_names, real_img_name, future_img_names, future_mask_names = self.dataset_samples[index]
cur_img = cv2.imread(cur_img_name)
cur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2RGB)
real_img = cv2.imread(real_img_name)
real_img = cv2.cvtColor(real_img, cv2.COLOR_BGR2RGB)
cur_mask = cv2.imread(cur_mask_name)
cur_mask = cv2.cvtColor(cur_mask, cv2.COLOR_BGR2RGB)
pre_imgs = []
future_imgs = []
pre_masks = []
future_masks = []
for p in range(self.previous_num):
pre_img_name = pre_img_names[p]
pre_mask_name = pre_mask_names[p]
if pre_img_name == "no pic":
if len(pre_imgs) == 0:
pre_img = copy.copy(cur_img)
pre_mask = copy.copy(cur_mask)
pre_mask = pre_mask[:, :, 0].astype(np.float32) / 255.
else:
pre_img = copy.copy(pre_imgs[-1])
pre_mask = copy.copy(pre_masks[-1])
else:
pre_img = cv2.imread(pre_img_name)
pre_img = cv2.cvtColor(pre_img, cv2.COLOR_BGR2RGB)
pre_mask = cv2.imread(pre_mask_name)
pre_mask = pre_mask[:, :, 0].astype(np.float32) / 255.
assert pre_img.shape[0]>0
#print(pre_mask_name, pre_mask.shape, cur_mask.shape, len(pre_masks))
pre_imgs.append(pre_img)
pre_masks.append(pre_mask)
for p in range(self.future_num):
future_img_name = future_img_names[p]
future_mask_name = future_mask_names[p]
if future_img_name == "no pic":
if len(future_imgs) == 0:
future_img = copy.copy(cur_img)
future_mask = copy.copy(cur_mask)
future_mask = future_mask[:, :, 0].astype(np.float32) / 255.
else:
future_img = copy.copy(future_imgs[-1])
future_mask = copy.copy(future_masks[-1])
else:
future_img = cv2.imread(future_img_name)
future_img = cv2.cvtColor(future_img, cv2.COLOR_BGR2RGB)
future_mask = cv2.imread(future_mask_name)
future_mask = future_mask[:, :, 0].astype(np.float32) / 255.
assert future_img.shape[0]>0
future_imgs.append(future_img)
future_masks.append(future_mask)
pre_imgs += future_imgs
pre_masks += future_masks
assert len(pre_imgs) == len(pre_masks)
#print(pre_mask.dtype, pre_img.dtype, cur_img.dtype, pre_img_name)
#pre_imgs = np.array(pre_imgs)
#pre_masks = np.array(pre_masks)
cur_mask = cur_mask[:, :, 0].astype(np.float32) / 255.
return {
'name': cur_img_name,
'image': cur_img,
'object_mask': cur_mask,
'target_image': real_img,
'image_id': index,
'pre_image': pre_imgs,
'pre_object_mask': pre_masks
}
class MyPreviousSequenceDataset_future(BaseHDataset):
def __init__(self, dataset_list, dataset_path, previous_num, future_num, **kwargs):
super(MyPreviousSequenceDataset_future, self).__init__(**kwargs)
self.dataset_path = dataset_path
self.dataset_samples = []
self.previous_num = previous_num
self.future_num = future_num
with open(dataset_list, 'r') as f:
for line in f.readlines():
real_img_name, cur_mask_name, cur_img_name = line.strip().split()
cur_img_name = cur_img_name.replace('\\', '/')
cur_mask_name = cur_mask_name.replace('\\', '/')
real_img_name = real_img_name.replace('\\', '/')
cur_img_name = os.path.join(self.dataset_path, cur_img_name)
cur_mask_name = os.path.join(self.dataset_path, cur_mask_name)
real_img_name = os.path.join(self.dataset_path, real_img_name)
#path, number = os.path.split(cur_img_name)
path, number = cur_img_name[:-9], cur_img_name[-9:]
#mask_path, mask_number = os.path.split(cur_mask_name)
mask_path, mask_number = cur_mask_name[:-9], cur_mask_name[-9:]
pre_img_names = []
pre_mask_names = []
future_img_names = []
future_mask_names = []
for p in range(1, previous_num + 1):
pre_number = '%05d' % (int(number[:-4])-5 *p) + number[-4:]
pre_mask_number = '%05d' % (int(mask_number[:-4]) -5*p) + mask_number[-4:]
#print(pre_mask_number)
#pre_img_name = os.path.join(path, pre_number)
pre_img_name = path + pre_number
#pre_mask_name = os.path.join(mask_path, pre_mask_number)
pre_mask_name = mask_path + pre_mask_number
if not os.path.exists(pre_mask_name):
#pre_img_name = "no pic"
#pre_mask_name = "no pic"
if len(pre_img_names) > 0:
pre_img_name = pre_img_names[-1]
pre_mask_name = pre_mask_names[-1]
else:
pre_img_name = cur_img_name
pre_mask_name = cur_mask_name
pre_img_names.append(pre_img_name)
pre_mask_names.append(pre_mask_name)
for p in range(1, future_num + 1):
future_number = '%05d' % (int(number[:-4]) + 5 *p) + number[-4:]
future_mask_number = '%05d' % (int(mask_number[:-4]) + 5*p) + mask_number[-4:]
#print(pre_mask_number)
#future_img_name = os.path.join(path, future_number)
future_img_name = path + future_number
#future_mask_name = os.path.join(mask_path, future_mask_number)
future_mask_name = mask_path + future_mask_number
if not os.path.exists(future_mask_name):
#future_img_name = "no pic"
#future_mask_name = "no pic"
if len(future_mask_names) > 0:
future_img_name = future_img_names[-1]
future_mask_name = future_mask_names[-1]
else:
future_img_name = cur_img_name
future_mask_name = cur_mask_name
future_img_names.append(future_img_name)
future_mask_names.append(future_mask_name)
self.dataset_samples.append((cur_img_name, cur_mask_name, pre_img_names, pre_mask_names, real_img_name, future_img_names, future_mask_names))
def get_sample(self, index):
cur_img_name, cur_mask_name, pre_img_names, pre_mask_names, real_img_name, future_img_names, future_mask_names = self.dataset_samples[index]
cur_img = cv2.imread(cur_img_name)
cur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2RGB)
real_img = cv2.imread(real_img_name)
real_img = cv2.cvtColor(real_img, cv2.COLOR_BGR2RGB)
cur_mask = cv2.imread(cur_mask_name)
cur_mask = cv2.cvtColor(cur_mask, cv2.COLOR_BGR2RGB)
pre_imgs = []
future_imgs = []
pre_masks = []
future_masks = []
for p in range(self.previous_num):
pre_img_name = pre_img_names[p]
pre_mask_name = pre_mask_names[p]
if pre_img_name == "no pic":
"""
img_shape = cur_img.shape
mask_shape = cur_mask.shape
pre_img = np.zeros(img_shape)
pre_mask = np.zeros(mask_shape)
pre_img = pre_img.astype(np.uint8)
pre_mask = pre_mask.astype(np.uint8)
"""
if len(pre_imgs) == 0:
pre_img = copy.copy(cur_img)
pre_mask = copy.copy(cur_mask)
pre_mask = pre_mask[:, :, 0].astype(np.float32) / 255.
else:
pre_img = copy.copy(pre_imgs[-1])
pre_mask = copy.copy(pre_masks[-1])
else:
pre_img = cv2.imread(pre_img_name)
pre_img = cv2.cvtColor(pre_img, cv2.COLOR_BGR2RGB)
pre_mask = cv2.imread(pre_mask_name)
pre_mask = pre_mask[:, :, 0].astype(np.float32) / 255.
assert pre_img.shape[0]>0
#print(pre_mask_name, pre_mask.shape, cur_mask.shape, len(pre_masks))
pre_imgs.append(pre_img)
pre_masks.append(pre_mask)
for p in range(self.future_num):
future_img_name = future_img_names[p]
future_mask_name = future_mask_names[p]
if future_img_name == "no pic":
"""
img_shape = cur_img.shape
mask_shape = cur_mask.shape
pre_img = np.zeros(img_shape)
pre_mask = np.zeros(mask_shape)
pre_img = pre_img.astype(np.uint8)
pre_mask = pre_mask.astype(np.uint8)
"""
if len(future_imgs) == 0:
future_img = copy.copy(cur_img)
future_mask = copy.copy(cur_mask)
future_mask = future_mask[:, :, 0].astype(np.float32) / 255.
else:
future_img = copy.copy(future_imgs[-1])
future_mask = copy.copy(future_masks[-1])
else:
future_img = cv2.imread(future_img_name)
future_img = cv2.cvtColor(future_img, cv2.COLOR_BGR2RGB)
future_mask = cv2.imread(future_mask_name)
future_mask = future_mask[:, :, 0].astype(np.float32) / 255.
assert future_img.shape[0]>0
future_imgs.append(future_img)
future_masks.append(future_mask)
pre_imgs += future_imgs
pre_masks += future_masks
assert len(pre_imgs) == len(pre_masks)
#print(pre_mask.dtype, pre_img.dtype, cur_img.dtype, pre_img_name)
#pre_imgs = np.array(pre_imgs)
#pre_masks = np.array(pre_masks)
cur_mask = cur_mask[:, :, 0].astype(np.float32) / 255.
return {
'name': cur_img_name,
'image': cur_img,
'object_mask': cur_mask,
'target_image': real_img,
'image_id': index,
'pre_image': pre_imgs,
'pre_object_mask': pre_masks
}
class MyPreviousDataset(BaseHDataset):
def __init__(self, dataset_list, dataset_path, **kwargs):
super(MyPreviousDataset, self).__init__(**kwargs)
self.dataset_path = dataset_path
self.dataset_samples = []
with open(dataset_list, 'r') as f:
for line in f.readlines():
real_img_name, cur_mask_name, cur_img_name = line.strip().split()
cur_img_name = cur_img_name.replace('\\', '/')
cur_mask_name = cur_mask_name.replace('\\', '/')
real_img_name = real_img_name.replace('\\', '/')
cur_img_name = os.path.join(self.dataset_path, cur_img_name)
cur_mask_name = os.path.join(self.dataset_path, cur_mask_name)
real_img_name = os.path.join(self.dataset_path, real_img_name)
path, number = os.path.split(cur_img_name)
mask_path, mask_number = os.path.split(cur_mask_name)
pre_number = '%05d' % (int(number[:-4])-5) + number[-4:]
pre_mask_number = '%05d' % (int(mask_number[:-4]) -5) + mask_number[-4:]
pre_img_name = os.path.join(path, pre_number)
pre_mask_name = os.path.join(mask_path, pre_mask_number)
if not os.path.exists(pre_mask_name):
pre_img_name = "no pic"
pre_mask_name = "no pic"
self.dataset_samples.append((cur_img_name, cur_mask_name, pre_img_name, pre_mask_name, real_img_name))
def get_sample(self, index):
cur_img_name, cur_mask_name, pre_img_name, pre_mask_name, real_img_name = self.dataset_samples[index]
cur_img = cv2.imread(cur_img_name)
cur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2RGB)
real_img = cv2.imread(real_img_name)
real_img = cv2.cvtColor(real_img, cv2.COLOR_BGR2RGB)
cur_mask = cv2.imread(cur_mask_name)
#cur_mask = cv2.cvtColor(cur_mask, cv2.COLOR_BGR2RGB)
if pre_img_name == "no pic":
"""
img_shape = cur_img.shape
mask_shape = cur_mask.shape
pre_img = np.zeros(img_shape)
pre_mask = np.zeros(mask_shape)
pre_img = pre_img.astype(np.uint8)
pre_mask = pre_mask.astype(np.uint8)
"""
pre_img = copy.copy(cur_img)
pre_mask = copy.copy(cur_mask)
else:
pre_img = cv2.imread(pre_img_name)
pre_img = cv2.cvtColor(pre_img, cv2.COLOR_BGR2RGB)
pre_mask = cv2.imread(pre_mask_name)
assert pre_img.shape[0]>0
#print(pre_mask.dtype, pre_img.dtype, cur_img.dtype, pre_img_name)
cur_mask = cur_mask[:, :, 0].astype(np.float32) / 255.
pre_mask = pre_mask[:, :, 0].astype(np.float32) / 255.
return {
'image': cur_img,
'object_mask': cur_mask,
'target_image': real_img,
'image_id': index,
'pre_image': pre_img,
'pre_object_mask': pre_mask
}
class ComposeDataset(BaseHDataset):
def __init__(self, datasets, **kwargs):
super(ComposeDataset, self).__init__(**kwargs)
self._datasets = datasets
self.dataset_samples = []
for dataset_indx, dataset in enumerate(self._datasets):
self.dataset_samples.extend([(dataset_indx, i) for i in range(len(dataset))])
def get_sample(self, index):
dataset_indx, sample_indx = self.dataset_samples[index]
return self._datasets[dataset_indx].get_sample(sample_indx)
| 20,372 | 45.09276 | 157 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/losses.py | import torch
import torch.nn as nn
from iharm.utils import misc
class Loss(nn.Module):
def __init__(self, pred_outputs, gt_outputs):
super().__init__()
self.pred_outputs = pred_outputs
self.gt_outputs = gt_outputs
class MSE(Loss):
def __init__(self, pred_name='images', gt_image_name='target_images'):
super(MSE, self).__init__(pred_outputs=(pred_name,), gt_outputs=(gt_image_name,))
def forward(self, pred, label):
label = label.view(pred.size())
loss = torch.mean((pred - label) ** 2, dim=misc.get_dims_with_exclusion(label.dim(), 0))
return loss
class MaskWeightedMSE(Loss):
def __init__(self, min_area=1000.0, pred_name='images',
gt_image_name='target_images', gt_mask_name='masks'):
super(MaskWeightedMSE, self).__init__(pred_outputs=(pred_name, ),
gt_outputs=(gt_image_name, gt_mask_name))
self.min_area = min_area
def forward(self, pred, label, mask):
label = label.view(pred.size())
reduce_dims = misc.get_dims_with_exclusion(label.dim(), 0)
loss = (pred - label) ** 2
delimeter = pred.size(1) * torch.clamp_min(torch.sum(mask, dim=reduce_dims), self.min_area)
loss = torch.sum(loss, dim=reduce_dims) / delimeter
return loss
class MaskWeightedMSE_tc(Loss):
def __init__(self, min_area=1000.0, pred_name='images',
gt_image_name='lut_output', gt_mask_name='masks'):
super(MaskWeightedMSE_tc, self).__init__(pred_outputs=(pred_name, ),
gt_outputs=(gt_image_name, gt_mask_name))
self.min_area = min_area
def forward(self, pred, label, mask):
label = label.view(pred.size())
reduce_dims = misc.get_dims_with_exclusion(label.dim(), 0)
loss = (pred - label) ** 2
delimeter = pred.size(1) * torch.clamp_min(torch.sum(mask, dim=reduce_dims), self.min_area)
loss = torch.sum(loss, dim=reduce_dims) / delimeter
return loss | 2,073 | 35.385965 | 99 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/metrics.py | import torch
import torch.nn.functional as F
class TrainMetric(object):
def __init__(self, pred_outputs, gt_outputs, epsilon=1e-6):
self.pred_outputs = pred_outputs
self.gt_outputs = gt_outputs
self.epsilon = epsilon
self._last_batch_metric = 0.0
self._epoch_metric_sum = 0.0
self._epoch_batch_count = 0
def compute(self, *args, **kwargs):
raise NotImplementedError
def update(self, *args, **kwargs):
self._last_batch_metric = self.compute(*args, **kwargs)
self._epoch_metric_sum += self._last_batch_metric
self._epoch_batch_count += 1
def get_epoch_value(self):
if self._epoch_batch_count > 0:
return self._epoch_metric_sum / self._epoch_batch_count
else:
return 0.0
def reset_epoch_stats(self):
self._epoch_metric_sum = 0.0
self._epoch_batch_count = 0
def log_states(self, sw, tag_prefix, global_step):
sw.add_scalar(tag=tag_prefix, value=self._last_batch_metric, global_step=global_step)
@property
def name(self):
return type(self).__name__
class PSNRMetric(TrainMetric):
def __init__(self, pred_output='instances', gt_output='instances'):
super(PSNRMetric, self).__init__((pred_output, ), (gt_output, ))
def compute(self, pred, gt):
mse = F.mse_loss(pred, gt)
squared_max = gt.max() ** 2
psnr = 10 * torch.log10(squared_max / (mse + self.epsilon))
return psnr.item()
class DenormalizedTrainMetric(TrainMetric):
def __init__(self, pred_outputs, gt_outputs, mean=None, std=None):
super(DenormalizedTrainMetric, self).__init__(pred_outputs, gt_outputs)
self.mean = torch.zeros(1) if mean is None else mean
self.std = torch.ones(1) if std is None else std
self.device = None
def init_device(self, input_device):
if self.device is None:
self.device = input_device
self.mean = self.mean.to(self.device)
self.std = self.std.to(self.device)
def denormalize(self, tensor):
self.init_device(tensor.device)
return tensor * self.std + self.mean
def update(self, *args, **kwargs):
self._last_batch_metric = self.compute(*args, **kwargs)
self._epoch_metric_sum += self._last_batch_metric
self._epoch_batch_count += 1
class DenormalizedPSNRMetric(DenormalizedTrainMetric):
def __init__(
self,
pred_output='instances', gt_output='instances',
mean=None, std=None,
):
super(DenormalizedPSNRMetric, self).__init__((pred_output, ), (gt_output, ), mean, std)
def compute(self, pred, gt):
denormalized_pred = torch.clamp(self.denormalize(pred), 0, 1)
denormalized_gt = self.denormalize(gt)
return PSNRMetric.compute(self, denormalized_pred, denormalized_gt)
class DenormalizedMSEMetric(DenormalizedTrainMetric):
def __init__(
self,
pred_output='instances', gt_output='instances',
mean=None, std=None,
):
super(DenormalizedMSEMetric, self).__init__((pred_output, ), (gt_output, ), mean, std)
def compute(self, pred, gt):
denormalized_pred = self.denormalize(pred) * 255
denormalized_gt = self.denormalize(gt) * 255
return F.mse_loss(denormalized_pred, denormalized_gt).item()
class MSEMetric(TrainMetric):
def __init__(
self,
pred_output='instances', gt_output='instances',
mean=None, std=None,
):
super(MSEMetric, self).__init__((pred_output, ), (gt_output, ))
def compute(self, pred, gt):
pred = pred * 255
gt = gt * 255
return F.mse_loss(pred, gt).item()
| 3,732 | 31.745614 | 95 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/ops.py | import torch
from torch import nn as nn
class SimpleInputFusion(nn.Module):
def __init__(self, add_ch=1, rgb_ch=3, ch=8, norm_layer=nn.BatchNorm2d):
super(SimpleInputFusion, self).__init__()
self.fusion_conv = nn.Sequential(
nn.Conv2d(in_channels=add_ch + rgb_ch, out_channels=ch, kernel_size=1),
nn.LeakyReLU(negative_slope=0.2),
norm_layer(ch),
nn.Conv2d(in_channels=ch, out_channels=rgb_ch, kernel_size=1),
)
def forward(self, image, additional_input):
return self.fusion_conv(torch.cat((image, additional_input), dim=1))
class ChannelAttention(nn.Module):
def __init__(self, in_channels):
super(ChannelAttention, self).__init__()
self.global_pools = nn.ModuleList([
nn.AdaptiveAvgPool2d(1),
nn.AdaptiveMaxPool2d(1),
])
intermediate_channels_count = max(in_channels // 16, 8)
self.attention_transform = nn.Sequential(
nn.Linear(len(self.global_pools) * in_channels, intermediate_channels_count),
nn.ReLU(),
nn.Linear(intermediate_channels_count, in_channels),
nn.Sigmoid(),
)
def forward(self, x):
pooled_x = []
for global_pool in self.global_pools:
pooled_x.append(global_pool(x))
pooled_x = torch.cat(pooled_x, dim=1).flatten(start_dim=1)
channel_attention_weights = self.attention_transform(pooled_x)[..., None, None]
return channel_attention_weights * x
class MaskedChannelAttention(nn.Module):
def __init__(self, in_channels, *args, **kwargs):
super(MaskedChannelAttention, self).__init__()
self.global_max_pool = MaskedGlobalMaxPool2d()
self.global_avg_pool = FastGlobalAvgPool2d()
intermediate_channels_count = max(in_channels // 16, 8)
self.attention_transform = nn.Sequential(
nn.Linear(3 * in_channels, intermediate_channels_count),
nn.ReLU(inplace=True),
nn.Linear(intermediate_channels_count, in_channels),
nn.Sigmoid(),
)
def forward(self, x, mask):
if mask.shape[2:] != x.shape[:2]:
mask = nn.functional.interpolate(
mask, size=x.size()[-2:],
mode='bilinear', align_corners=True
)
pooled_x = torch.cat([
self.global_max_pool(x, mask),
self.global_avg_pool(x)
], dim=1)
channel_attention_weights = self.attention_transform(pooled_x)[..., None, None]
return channel_attention_weights * x
class MaskedGlobalMaxPool2d(nn.Module):
def __init__(self):
super().__init__()
self.global_max_pool = FastGlobalMaxPool2d()
def forward(self, x, mask):
return torch.cat((
self.global_max_pool(x * mask),
self.global_max_pool(x * (1.0 - mask))
), dim=1)
class FastGlobalAvgPool2d(nn.Module):
def __init__(self):
super(FastGlobalAvgPool2d, self).__init__()
def forward(self, x):
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
class FastGlobalMaxPool2d(nn.Module):
def __init__(self):
super(FastGlobalMaxPool2d, self).__init__()
def forward(self, x):
in_size = x.size()
return x.view((in_size[0], in_size[1], -1)).max(dim=2)[0]
class ScaleLayer(nn.Module):
def __init__(self, init_value=1.0, lr_mult=1):
super().__init__()
self.lr_mult = lr_mult
self.scale = nn.Parameter(
torch.full((1,), init_value / lr_mult, dtype=torch.float32)
)
def forward(self, x):
scale = torch.abs(self.scale * self.lr_mult)
return x * scale
class FeaturesConnector(nn.Module):
def __init__(self, mode, in_channels, feature_channels, out_channels):
super(FeaturesConnector, self).__init__()
self.mode = mode if feature_channels else ''
if self.mode == 'catc':
self.reduce_conv = nn.Conv2d(in_channels + feature_channels, out_channels, kernel_size=1)
elif self.mode == 'sum':
self.reduce_conv = nn.Conv2d(feature_channels, out_channels, kernel_size=1)
self.output_channels = out_channels if self.mode != 'cat' else in_channels + feature_channels
def forward(self, x, features):
if self.mode == 'cat':
return torch.cat((x, features), 1)
if self.mode == 'catc':
return self.reduce_conv(torch.cat((x, features), 1))
if self.mode == 'sum':
return self.reduce_conv(features) + x
return x
def extra_repr(self):
return self.mode
| 4,695 | 32.784173 | 101 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/initializer.py | import torch
import torch.nn as nn
import numpy as np
class Initializer(object):
def __init__(self, local_init=True, gamma=None):
self.local_init = local_init
self.gamma = gamma
def __call__(self, m):
if getattr(m, '__initialized', False):
return
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.GroupNorm, nn.SyncBatchNorm)) or 'BatchNorm' in m.__class__.__name__:
if m.weight is not None:
self._init_gamma(m.weight.data)
if m.bias is not None:
self._init_beta(m.bias.data)
else:
if getattr(m, 'weight', None) is not None:
self._init_weight(m.weight.data)
if getattr(m, 'bias', None) is not None:
self._init_bias(m.bias.data)
if self.local_init:
object.__setattr__(m, '__initialized', True)
def _init_weight(self, data):
nn.init.uniform_(data, -0.07, 0.07)
def _init_bias(self, data):
nn.init.constant_(data, 0)
def _init_gamma(self, data):
if self.gamma is None:
nn.init.constant_(data, 1.0)
else:
nn.init.normal_(data, 1.0, self.gamma)
def _init_beta(self, data):
nn.init.constant_(data, 0)
class Bilinear(Initializer):
def __init__(self, scale, groups, in_channels, **kwargs):
super().__init__(**kwargs)
self.scale = scale
self.groups = groups
self.in_channels = in_channels
def _init_weight(self, data):
"""Reset the weight and bias."""
bilinear_kernel = self.get_bilinear_kernel(self.scale)
weight = torch.zeros_like(data)
for i in range(self.in_channels):
if self.groups == 1:
j = i
else:
j = 0
weight[i, j] = bilinear_kernel
data[:] = weight
@staticmethod
def get_bilinear_kernel(scale):
"""Generate a bilinear upsampling kernel."""
kernel_size = 2 * scale - scale % 2
scale = (kernel_size + 1) // 2
center = scale - 0.5 * (1 + kernel_size % 2)
og = np.ogrid[:kernel_size, :kernel_size]
kernel = (1 - np.abs(og[0] - center) / scale) * (1 - np.abs(og[1] - center) / scale)
return torch.tensor(kernel, dtype=torch.float32)
class XavierGluon(Initializer):
def __init__(self, rnd_type='uniform', factor_type='avg', magnitude=3, **kwargs):
super().__init__(**kwargs)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, arr):
fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(arr)
if self.factor_type == 'avg':
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == 'in':
factor = fan_in
elif self.factor_type == 'out':
factor = fan_out
else:
raise ValueError('Incorrect factor type')
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == 'uniform':
nn.init.uniform_(arr, -scale, scale)
elif self.rnd_type == 'gaussian':
nn.init.normal_(arr, 0, scale)
else:
raise ValueError('Unknown random type')
| 3,408 | 31.160377 | 98 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/syncbn/modules/nn/syncbn.py | """
/*****************************************************************************/
BatchNorm2dSync with multi-gpu
/*****************************************************************************/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
# python 3
from queue import Queue
except ImportError:
# python 2
from Queue import Queue
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from iharm.model.syncbn.modules.functional import batchnorm2d_sync
class _BatchNorm(nn.Module):
"""
Customized BatchNorm from nn.BatchNorm
>> added freeze attribute to enable bn freeze.
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(_BatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.freezed = False
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.reset_parameters()
def reset_parameters(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
return NotImplemented
def forward(self, input):
self._check_input_dim(input)
compute_stats = not self.freezed and \
self.training and self.track_running_stats
ret = F.batch_norm(input, self.running_mean, self.running_var,
self.weight, self.bias, compute_stats,
self.momentum, self.eps)
return ret
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, '\
'affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(
**self.__dict__)
class BatchNorm2dNoSync(_BatchNorm):
"""
Equivalent to nn.BatchNorm2d
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
class BatchNorm2dSync(BatchNorm2dNoSync):
"""
BatchNorm2d with automatic multi-GPU Sync
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(BatchNorm2dSync, self).__init__(
num_features, eps=eps, momentum=momentum, affine=affine,
track_running_stats=track_running_stats)
self.sync_enabled = True
self.devices = list(range(torch.cuda.device_count()))
if len(self.devices) > 1:
# Initialize queues
self.worker_ids = self.devices[1:]
self.master_queue = Queue(len(self.worker_ids))
self.worker_queues = [Queue(1) for _ in self.worker_ids]
def forward(self, x):
compute_stats = not self.freezed and \
self.training and self.track_running_stats
if self.sync_enabled and compute_stats and len(self.devices) > 1:
if x.get_device() == self.devices[0]:
# Master mode
extra = {
"is_master": True,
"master_queue": self.master_queue,
"worker_queues": self.worker_queues,
"worker_ids": self.worker_ids
}
else:
# Worker mode
extra = {
"is_master": False,
"master_queue": self.master_queue,
"worker_queue": self.worker_queues[
self.worker_ids.index(x.get_device())]
}
return batchnorm2d_sync(x, self.weight, self.bias,
self.running_mean, self.running_var,
extra, compute_stats, self.momentum,
self.eps)
return super(BatchNorm2dSync, self).forward(x)
def __repr__(self):
"""repr"""
rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \
'affine={affine}, ' \
'track_running_stats={track_running_stats},' \
'devices={devices})'
return rep.format(name=self.__class__.__name__, **self.__dict__)
#BatchNorm2d = BatchNorm2dNoSync
BatchNorm2d = BatchNorm2dSync
| 5,187 | 33.818792 | 79 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/syncbn/modules/functional/syncbn.py | """
/*****************************************************************************/
BatchNorm2dSync with multi-gpu
code referenced from : https://github.com/mapillary/inplace_abn
/*****************************************************************************/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.cuda.comm as comm
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from ._csrc import _backend
def _count_samples(x):
count = 1
for i, s in enumerate(x.size()):
if i != 1:
count *= s
return count
class BatchNorm2dSyncFunc(Function):
@staticmethod
def forward(ctx, x, weight, bias, running_mean, running_var,
extra, compute_stats=True, momentum=0.1, eps=1e-05):
def _parse_extra(ctx, extra):
ctx.is_master = extra["is_master"]
if ctx.is_master:
ctx.master_queue = extra["master_queue"]
ctx.worker_queues = extra["worker_queues"]
ctx.worker_ids = extra["worker_ids"]
else:
ctx.master_queue = extra["master_queue"]
ctx.worker_queue = extra["worker_queue"]
# Save context
if extra is not None:
_parse_extra(ctx, extra)
ctx.compute_stats = compute_stats
ctx.momentum = momentum
ctx.eps = eps
ctx.affine = weight is not None and bias is not None
if ctx.compute_stats:
N = _count_samples(x) * (ctx.master_queue.maxsize + 1)
assert N > 1
# 1. compute sum(x) and sum(x^2)
xsum, xsqsum = _backend.syncbn_sum_sqsum(x.detach())
if ctx.is_master:
xsums, xsqsums = [xsum], [xsqsum]
# master : gatther all sum(x) and sum(x^2) from slaves
for _ in range(ctx.master_queue.maxsize):
xsum_w, xsqsum_w = ctx.master_queue.get()
ctx.master_queue.task_done()
xsums.append(xsum_w)
xsqsums.append(xsqsum_w)
xsum = comm.reduce_add(xsums)
xsqsum = comm.reduce_add(xsqsums)
mean = xsum / N
sumvar = xsqsum - xsum * mean
var = sumvar / N
uvar = sumvar / (N - 1)
# master : broadcast global mean, variance to all slaves
tensors = comm.broadcast_coalesced(
(mean, uvar, var), [mean.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
# slave : send sum(x) and sum(x^2) to master
ctx.master_queue.put((xsum, xsqsum))
# slave : get global mean and variance
mean, uvar, var = ctx.worker_queue.get()
ctx.worker_queue.task_done()
# Update running stats
running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean)
running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * uvar)
ctx.N = N
ctx.save_for_backward(x, weight, bias, mean, var)
else:
mean, var = running_mean, running_var
# do batch norm forward
z = _backend.syncbn_forward(x, weight, bias, mean, var,
ctx.affine, ctx.eps)
return z
@staticmethod
@once_differentiable
def backward(ctx, dz):
x, weight, bias, mean, var = ctx.saved_tensors
dz = dz.contiguous()
# 1. compute \sum(\frac{dJ}{dy_i}) and \sum(\frac{dJ}{dy_i}*\hat{x_i})
sum_dz, sum_dz_xhat = _backend.syncbn_backward_xhat(
dz, x, mean, var, ctx.eps)
if ctx.is_master:
sum_dzs, sum_dz_xhats = [sum_dz], [sum_dz_xhat]
# master : gatther from slaves
for _ in range(ctx.master_queue.maxsize):
sum_dz_w, sum_dz_xhat_w = ctx.master_queue.get()
ctx.master_queue.task_done()
sum_dzs.append(sum_dz_w)
sum_dz_xhats.append(sum_dz_xhat_w)
# master : compute global stats
sum_dz = comm.reduce_add(sum_dzs)
sum_dz_xhat = comm.reduce_add(sum_dz_xhats)
sum_dz /= ctx.N
sum_dz_xhat /= ctx.N
# master : broadcast global stats
tensors = comm.broadcast_coalesced(
(sum_dz, sum_dz_xhat), [mean.get_device()] + ctx.worker_ids)
for ts, queue in zip(tensors[1:], ctx.worker_queues):
queue.put(ts)
else:
# slave : send to master
ctx.master_queue.put((sum_dz, sum_dz_xhat))
# slave : get global stats
sum_dz, sum_dz_xhat = ctx.worker_queue.get()
ctx.worker_queue.task_done()
# do batch norm backward
dx, dweight, dbias = _backend.syncbn_backward(
dz, x, weight, bias, mean, var, sum_dz, sum_dz_xhat,
ctx.affine, ctx.eps)
return dx, dweight, dbias, \
None, None, None, None, None, None
batchnorm2d_sync = BatchNorm2dSyncFunc.apply
__all__ = ["batchnorm2d_sync"]
| 5,291 | 37.347826 | 79 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/syncbn/modules/functional/_csrc.py | """
/*****************************************************************************/
Extension module loader
code referenced from : https://github.com/facebookresearch/maskrcnn-benchmark
/*****************************************************************************/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import torch
try:
from torch.utils.cpp_extension import load
from torch.utils.cpp_extension import CUDA_HOME
except ImportError:
raise ImportError(
"The cpp layer extensions requires PyTorch 0.4 or higher")
def _load_C_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
this_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
sources_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
sources_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
sources = main_file + sources_cpu
extra_cflags = []
extra_cuda_cflags = []
if torch.cuda.is_available() and CUDA_HOME is not None:
sources.extend(sources_cuda)
extra_cflags = ["-O3", "-DWITH_CUDA"]
extra_cuda_cflags = ["--expt-extended-lambda"]
sources = [os.path.join(this_dir, s) for s in sources]
extra_include_paths = [this_dir]
return load(
name="ext_lib",
sources=sources,
extra_cflags=extra_cflags,
extra_include_paths=extra_include_paths,
extra_cuda_cflags=extra_cuda_cflags,
)
_backend = _load_C_extensions()
| 1,586 | 27.854545 | 79 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/backboned/hrnet.py | import torch.nn as nn
from iharm.model.modeling.hrnet_ocr import HighResolutionNet
from iharm.model.backboned.ih_model import IHModelWithBackbone
from iharm.model.modifiers import LRMult
from iharm.model.modeling.basic_blocks import MaxPoolDownSize
class HRNetIHModel(IHModelWithBackbone):
def __init__(
self,
base_config,
downsize_hrnet_input=False, mask_fusion='sum',
lr_mult=0.1, cat_hrnet_outputs=True, pyramid_channels=-1,
ocr=64, width=18, small=True,
mode='cat',
**base_kwargs
):
"""
Creates image harmonization model supported by the features extracted from the pre-trained HRNet backbone.
HRNet outputs feature maps on 4 different resolutions.
Parameters
----------
base_config : dict
Configuration dict for the base model, to which the backbone features are incorporated.
base_config contains model class and init parameters, examples can be found in iharm.mconfigs.base_models
downsize_backbone_input : bool
If the input image should be half-sized for the backbone.
mask_fusion : str
How to fuse the binary mask with the backbone input:
'sum': apply convolution to the mask and sum it with the output of the first convolution in the backbone
'rgb': concatenate the mask to the input image and translate it back to 3 channels with convolution
otherwise: do not fuse mask with the backbone input
lr_mult : float
Multiply learning rate to lr_mult when updating the weights of the backbone.
cat_hrnet_outputs : bool
If 4 HRNet outputs should be resized and concatenated to a single tensor.
pyramid_channels : int
When HRNet outputs are concatenated to a single one, it can be consequently downsized
to produce a feature pyramid.
The pyramid features are then fused with the encoder outputs in the base model on multiple layers.
Each pyramid feature map contains equal number of channels equal to pyramid_channels.
If pyramid_channels <= 0, the feature pyramid is not constructed.
ocr : int
When HRNet outputs are concatenated to a single one, the OCR module can be applied
resulting in feature map with (2 * ocr) channels. If ocr <= 0 the OCR module is not applied.
width : int
Width of the HRNet blocks.
small : bool
If True, HRNet contains 2 blocks at each stage and 4 otherwise.
mode : str
How to fuse the backbone features with the encoder outputs in the base model:
'sum': apply convolution to the backbone feature map obtaining number of channels
same as in the encoder output and sum them
'cat': concatenate the backbone feature map with the encoder output
'catc': concatenate the backbone feature map with the encoder output and apply convolution obtaining
number of channels same as in the encoder output
otherwise: the backbone features are not incorporated into the base model
base_kwargs : dict
any kwargs associated with the base model
"""
params = base_config['params']
params.update(base_kwargs)
depth = params['depth']
backbone = HRNetBB(
cat_outputs=cat_hrnet_outputs,
pyramid_channels=pyramid_channels,
pyramid_depth=min(depth - 2 if not downsize_hrnet_input else depth - 3, 4),
width=width, ocr=ocr, small=small,
lr_mult=lr_mult,
)
params.update(dict(
backbone_from=3 if downsize_hrnet_input else 2,
backbone_channels=backbone.output_channels,
backbone_mode=mode
))
base_model = base_config['model'](**params)
super(HRNetIHModel, self).__init__(base_model, backbone, downsize_hrnet_input, mask_fusion)
class HRNetBB(nn.Module):
def __init__(
self,
cat_outputs=True,
pyramid_channels=256, pyramid_depth=4,
width=18, ocr=64, small=True,
lr_mult=0.1,
):
super(HRNetBB, self).__init__()
self.cat_outputs = cat_outputs
self.ocr_on = ocr > 0 and cat_outputs
self.pyramid_on = pyramid_channels > 0 and cat_outputs
self.hrnet = HighResolutionNet(width, 2, ocr_width=ocr, small=small)
self.hrnet.apply(LRMult(lr_mult))
if self.ocr_on:
self.hrnet.ocr_distri_head.apply(LRMult(1.0))
self.hrnet.ocr_gather_head.apply(LRMult(1.0))
self.hrnet.conv3x3_ocr.apply(LRMult(1.0))
hrnet_cat_channels = [width * 2 ** i for i in range(4)]
if self.pyramid_on:
self.output_channels = [pyramid_channels] * 4
elif self.ocr_on:
self.output_channels = [ocr * 2]
elif self.cat_outputs:
self.output_channels = [sum(hrnet_cat_channels)]
else:
self.output_channels = hrnet_cat_channels
if self.pyramid_on:
downsize_in_channels = ocr * 2 if self.ocr_on else sum(hrnet_cat_channels)
self.downsize = MaxPoolDownSize(downsize_in_channels, pyramid_channels, pyramid_channels, pyramid_depth)
def forward(self, image, mask, mask_features):
if not self.cat_outputs:
return self.hrnet.compute_hrnet_feats(image, mask_features, return_list=True)
outputs = list(self.hrnet(image, mask, mask_features))
if self.pyramid_on:
outputs = self.downsize(outputs[0])
return outputs
def load_pretrained_weights(self, pretrained_path):
self.hrnet.load_pretrained_weights(pretrained_path)
| 5,787 | 43.523077 | 117 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/backboned/ih_model.py | import torch
import torch.nn as nn
from iharm.model.ops import SimpleInputFusion, ScaleLayer
class IHModelWithBackbone(nn.Module):
def __init__(
self,
model, backbone,
downsize_backbone_input=False,
mask_fusion='sum',
backbone_conv1_channels=64,
):
"""
Creates image harmonization model supported by the features extracted from the pre-trained backbone.
Parameters
----------
model : nn.Module
Image harmonization model takes image and mask as an input and handles features from the backbone network.
backbone : nn.Module
Backbone model accepts RGB image and returns a list of features.
downsize_backbone_input : bool
If the input image should be half-sized for the backbone.
mask_fusion : str
How to fuse the binary mask with the backbone input:
'sum': apply convolution to the mask and sum it with the output of the first convolution in the backbone
'rgb': concatenate the mask to the input image and translate it back to 3 channels with convolution
otherwise: do not fuse mask with the backbone input
backbone_conv1_channels : int
If mask_fusion is 'sum', define the number of channels for the convolution applied to the mask.
"""
super(IHModelWithBackbone, self).__init__()
self.downsize_backbone_input = downsize_backbone_input
self.mask_fusion = mask_fusion
self.backbone = backbone
self.model = model
if mask_fusion == 'rgb':
self.fusion = SimpleInputFusion()
elif mask_fusion == 'sum':
self.mask_conv = nn.Sequential(
nn.Conv2d(1, backbone_conv1_channels, kernel_size=3, stride=2, padding=1, bias=True),
ScaleLayer(init_value=0.1, lr_mult=1)
)
def forward(self, image, mask, previous_feat=None):
"""
Forward the backbone model and then the base model, supported by the backbone feature maps.
Return model predictions.
Parameters
----------
image : torch.Tensor
Input RGB image.
mask : torch.Tensor
Binary mask of the foreground region.
Returns
-------
torch.Tensor
Harmonized RGB image.
"""
backbone_image = image
backbone_mask = torch.cat((mask, 1.0 - mask), dim=1)
if self.downsize_backbone_input:
backbone_image = nn.functional.interpolate(
backbone_image, scale_factor=0.5,
mode='bilinear', align_corners=True
)
backbone_mask = nn.functional.interpolate(
backbone_mask, backbone_image.size()[2:],
mode='bilinear', align_corners=True
)
backbone_image = (
self.fusion(backbone_image, backbone_mask[:, :1])
if self.mask_fusion == 'rgb' else
backbone_image
)
backbone_mask_features = self.mask_conv(backbone_mask[:, :1]) if self.mask_fusion == 'sum' else None
backbone_features = self.backbone(backbone_image, backbone_mask, backbone_mask_features)
for back_feat in backbone_features:
print(back_feat.shape)
#print(backbone_features.shape)
output = self.model(image, mask, backbone_features, previous_feat)
return output
| 3,463 | 37.488889 | 118 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/backboned/deeplab.py | from torch import nn as nn
from iharm.model.modeling.deeplab_v3 import DeepLabV3Plus
from iharm.model.backboned.ih_model import IHModelWithBackbone
from iharm.model.modifiers import LRMult
from iharm.model.modeling.basic_blocks import MaxPoolDownSize
class DeepLabIHModel(IHModelWithBackbone):
def __init__(
self,
base_config,
mask_fusion='sum',
deeplab_backbone='resnet34',
lr_mult=0.1,
pyramid_channels=-1, deeplab_ch=256,
mode='cat',
**base_kwargs
):
"""
Creates image harmonization model supported by the features extracted from the pre-trained DeepLab backbone.
Parameters
----------
base_config : dict
Configuration dict for the base model, to which the backbone features are incorporated.
base_config contains model class and init parameters, examples can be found in iharm.mconfigs.base_models
mask_fusion : str
How to fuse the binary mask with the backbone input:
'sum': apply convolution to the mask and sum it with the output of the first convolution in the backbone
'rgb': concatenate the mask to the input image and translate it back to 3 channels with convolution
otherwise: do not fuse mask with the backbone input
deeplab_backbone : str
ResNet backbone name.
lr_mult : float
Multiply learning rate to lr_mult when updating the weights of the backbone.
pyramid_channels : int
The DeepLab output can be consequently downsized to produce a feature pyramid.
The pyramid features are then fused with the encoder outputs in the base model on multiple layers.
Each pyramid feature map contains equal number of channels equal to pyramid_channels.
If pyramid_channels <= 0, the feature pyramid is not constructed.
deeplab_ch : int
Number of channels for output DeepLab layer and some in the middle.
mode : str
How to fuse the backbone features with the encoder outputs in the base model:
'sum': apply convolution to the backbone feature map obtaining number of channels
same as in the encoder output and sum them
'cat': concatenate the backbone feature map with the encoder output
'catc': concatenate the backbone feature map with the encoder output and apply convolution obtaining
number of channels same as in the encoder output
otherwise: the backbone features are not incorporated into the base model
base_kwargs : dict
any kwargs associated with the base model
"""
params = base_config['params']
params.update(base_kwargs)
depth = params['depth']
backbone = DeepLabBB(pyramid_channels, deeplab_ch, deeplab_backbone, lr_mult)
downsize_input = depth > 7
params.update(dict(
backbone_from=3 if downsize_input else 2,
backbone_channels=backbone.output_channels,
backbone_mode=mode
))
base_model = base_config['model'](**params)
super(DeepLabIHModel, self).__init__(base_model, backbone, downsize_input, mask_fusion)
class DeepLabBB(nn.Module):
def __init__(
self,
pyramid_channels=256,
deeplab_ch=256,
backbone='resnet34',
backbone_lr_mult=0.1,
):
super(DeepLabBB, self).__init__()
self.pyramid_on = pyramid_channels > 0
if self.pyramid_on:
self.output_channels = [pyramid_channels] * 4
else:
self.output_channels = [deeplab_ch]
self.deeplab = DeepLabV3Plus(backbone=backbone,
ch=deeplab_ch,
project_dropout=0.2,
norm_layer=nn.BatchNorm2d,
backbone_norm_layer=nn.BatchNorm2d)
self.deeplab.backbone.apply(LRMult(backbone_lr_mult))
if self.pyramid_on:
self.downsize = MaxPoolDownSize(deeplab_ch, pyramid_channels, pyramid_channels, 4)
def forward(self, image, mask, mask_features):
outputs = list(self.deeplab(image, mask_features))
if self.pyramid_on:
outputs = self.downsize(outputs[0])
return outputs
def load_pretrained_weights(self):
self.deeplab.load_pretrained_weights()
| 4,474 | 41.619048 | 117 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/ssam_video_lut_dbp.py | import torch
from functools import partial
from torch import nn as nn
import torch.nn.functional as F
import numpy as np
from iharm.model.base.rain.util.config import cfg
from iharm.model.base.rain.models.networks import RainNet
from iharm.model.base.rain.models.normalize import RAIN
from iharm.model.base.rain.util import util as rainutil
from iharm.model.modeling.basic_blocks import ConvBlock, GaussianSmoothing
from iharm.model.modeling.unet import UNetEncoder, UNetDecoder
from iharm.model.ops import ChannelAttention
from iharm.model.modeling.dbp import NewRes, SimpleRefine
import time
from iharm.utils.misc import load_weights
from iharm.model.base import SSAMImageHarmonization
from iharm.model.modeling.lut import TrilinearInterpolation, TridistributeGeneraotr
class SSAMvideoLut(nn.Module):
def __init__(
self,
depth, device = None, backbone_path = "" , with_lutoutput = False, use_feature=True, need_normalize = True, need_denormalize = True,
backbone_type = 'issam'
):
super(SSAMvideoLut, self).__init__()
self.backbone_type = backbone_type
if self.backbone_type == 'issam':
self.mean = torch.tensor([.485, .456, .406], dtype=torch.float32).view(1, 3, 1, 1)
self.std = torch.tensor([.229, .224, .225], dtype=torch.float32).view(1, 3, 1, 1)
elif self.backbone_type == 'rain':
self.mean = torch.tensor([.5, .5, .5], dtype=torch.float32).view(1, 3, 1, 1)
self.std = torch.tensor([.5, .5, .5], dtype=torch.float32).view(1, 3, 1, 1)
self.depth = depth
self.use_feature = use_feature
self.need_normalize = need_normalize
self.need_denormalize = need_denormalize
self.backbone_checkpoint = backbone_path
# issam
self.with_lutoutput = with_lutoutput
self.device = device
if self.backbone_type == 'issam':
self.backbone = SSAMImageHarmonization(
depth=self.depth, ch=32, image_fusion=True, attention_mid_k=0.5,
attend_from=2, batchnorm_from=2)
load_weights(self.backbone, self.backbone_checkpoint)
elif self.backbone_type == 'rain':
self.backbone = RainNet(input_nc=cfg.input_nc,
output_nc=cfg.output_nc,
ngf=cfg.ngf,
norm_layer=RAIN,
use_dropout=not cfg.no_dropout)
state_dict = torch.load(self.backbone_checkpoint)
rainutil.copy_state_dict(self.backbone.state_dict(), state_dict)
print("load backbone from {}".format(self.backbone_checkpoint))
#
self.lut_dim = 33
self.lut_generator = TridistributeGeneraotr()
self.trilinear = TrilinearInterpolation()
# dbp refinement
if self.backbone_type == 'issam':
in_channel = 32 if use_feature else 0
self.refine_network = SimpleRefine(feature_channels=in_channel, inner_channel=64)
elif self.backbone_type == 'rain':
in_channel = 128 if use_feature else 0
self.refine_network = SimpleRefine(feature_channels=in_channel, inner_channel=64)
def load_backbone(self):
if self.backbone_type == 'issam':
load_weights(self.backbone, self.backbone_checkpoint)
elif self.backbone_type == 'rain':
state_dict = torch.load(self.backbone_checkpoint)
rainutil.copy_state_dict(self.backbone.state_dict(), state_dict)
print("load backbone")
#exit()
def init_device(self, input_device):
if self.device is None:
self.device = input_device
self.mean = self.mean.to(self.device)
self.std = self.std.to(self.device)
def normalize(self, tensor):
self.init_device(tensor.device)
# return self.norm(tensor)
return (tensor - self.mean) / self.std
def denormalize(self, tensor):
self.init_device(tensor.device)
return tensor * self.std + self.mean
def mask_denormalize(self, tensor, mask, original):
self.init_device(tensor.device)
tmp_res = tensor * self.std + self.mean
return tmp_res*mask + original * (1-mask)
def train(self, mode=True):
if self.backbone_type == 'issam':
self.backbone.eval()
elif self.backbone_type == 'rain':
self.backbone.eval()
self.refine_network.train()
def eval(self):
if self.backbone_type == 'issam':
self.backbone.eval()
elif self.backbone_type == 'rain':
self.backbone.eval()
self.refine_network.eval()
def forward(self, image, mask, backbone_features=None, previous={}, direct_lutoutput = None, direct_lut_map = None, names=[]):
dim = 33
#print(self.need_normalize, self.need_denormalize)
#exit()
if not self.with_lutoutput:
previous_images = previous['images']
previous_masks = previous['masks']
if self.need_normalize:
normaled_images = self.normalize(image)
else:
normaled_images = image
with torch.no_grad():
if self.backbone_type == 'issam':
cur_backbone_result = self.backbone(normaled_images, mask)
cur_backbone_output, cur_backbone_feature = cur_backbone_result['images'], cur_backbone_result['features']
elif self.backbone_type == 'rain':
cur_backbone_feature, cur_backbone_output = self.backbone.processImage(normaled_images, mask)
if self.need_denormalize:
denormaled_cur_backbone_output = self.mask_denormalize(cur_backbone_output, mask, image)
else:
denormaled_cur_backbone_output = cur_backbone_output
if not self.with_lutoutput:
previous_num = previous_images.shape[1]
pre_backbone_outputs = []
for index in range(previous_num):
if self.need_normalize:
normaled_previous_images = self.normalize(previous_images[:, index, :, :, :])
else:
normaled_previous_images = previous_images[:, index, :, :, :]
with torch.no_grad():
if self.backbone_type == 'issam':
pre_backbone_result = self.backbone(normaled_previous_images, previous_masks[:, index, :, :, :])
normaled_pre_backbone_output = pre_backbone_result['images']
elif self.backbone_type == 'rain':
_, normaled_pre_backbone_output = self.backbone.processImage(normaled_previous_images,
previous_masks[:, index, :, :, :])
pre_backbone_outputs.append(
self.mask_denormalize(normaled_pre_backbone_output, previous_masks[:, index, :, :, :],
previous_images[:, index, :, :, :]))
pre_backbone_outputs = torch.stack(pre_backbone_outputs, dim=1)
# luts = torch.zeros((3,33,33,33)).to(normaled_pre_issam_output.device)
# lut_counts = torch.zeros((3,33,33,33)).to(normaled_pre_issam_output.device)
# pre issam outputs seq*batch*3*256*256, previous image batch * seq*3*256*256
batch = image.shape[0]
w = previous_masks.shape[-2]
h = previous_masks.shape[-1]
tmp_previous_masks = torch.reshape(previous_masks.permute((0, 2, 1, 3, 4)),
(batch, 1, w * previous_num, h))
tmp_previous_images = torch.reshape(previous_images.permute((0, 2, 1, 3, 4)),
(batch, 3, w * previous_num, h))
tmp_pre_backbone_outputs = torch.reshape(pre_backbone_outputs.permute((0, 2, 1, 3, 4)),
(batch, 3, w * previous_num, h))
luts, lut_counts, _ = self.lut_generator(tmp_previous_masks,
tmp_previous_images,
tmp_pre_backbone_outputs)
#luts, lut_counts = self.lut_generator.divide(luts, lut_counts)
_, lut_output = self.trilinear(lut_counts, luts, image)
lut_output = lut_output * mask + image * (1 - mask)
lut_map = self.trilinear.count_map(lut_counts, image)
lut_map = lut_map * mask
lut_output = lut_output * (1 - lut_map) + lut_map * denormaled_cur_backbone_output
else:
lut_output = direct_lutoutput
lut_output = lut_output * (1 - direct_lut_map) + direct_lut_map * denormaled_cur_backbone_output
tmp_lut_output = lut_output
#lut_output = self.normalize(lut_output)
if self.need_normalize:
lut_output = self.normalize(lut_output)
t3 = time.time()
final_output = self.refine_network(cur_backbone_output, lut_output, image, cur_backbone_feature)
t4 = time.time()
if self.need_denormalize:
denormaled_final_output = self.mask_denormalize(final_output, mask, image)
else:
denormaled_final_output = final_output
#print((denormaled_final_output - lut_output).sum())
return {"images":denormaled_final_output, "backbone_out":denormaled_cur_backbone_output, "lut_output":tmp_lut_output}
| 9,579 | 46.192118 | 144 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/ssam_video_lut_withoutdbp.py | import torch
from functools import partial
from torch import nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
import os
import copy
from skimage.measure import compare_mse as mse
from iharm.model.base.rain.util.config import cfg
from iharm.model.base.rain.models.networks import RainNet
from iharm.model.base.rain.models.normalize import RAIN
from torchvision import transforms
from iharm.model.modeling.basic_blocks import ConvBlock, GaussianSmoothing
from iharm.model.modeling.unet import UNetEncoder, UNetDecoder
from iharm.model.ops import ChannelAttention
from iharm.model.modeling.dbp import NewRes, SimpleRefine
import time
from iharm.model.base.rain.util import util as rainutil
from iharm.utils.misc import load_weights
from iharm.model.base import SSAMImageHarmonization
from iharm.inference.transforms import NormalizeTensor, PadToDivisor, ToTensor, AddFlippedTensor
from iharm.model.modeling.lut import TrilinearInterpolation, TridistributeGeneraotr
write_dir = '/home/ubuntu/tensors/2_2/'
class SSAMvideoLutWithoutDbp(nn.Module):
def __init__(
self,
depth, device = None, backbone_path = "", use_lutoutput = False, fix_threshold = 0.1, k_threshold = 1,
write_lut_output = '', write_lut_map="", backbone_type = 'issam'
):
super(SSAMvideoLutWithoutDbp, self).__init__()
self.use_lutoutput = use_lutoutput
self.backbone_type = backbone_type
if self.backbone_type == 'issam':
self.mean = torch.tensor([.485, .456, .406], dtype=torch.float32).view(1, 3, 1, 1)
self.std = torch.tensor([.229, .224, .225], dtype=torch.float32).view(1, 3, 1, 1)
elif self.backbone_type == 'rain':
self.mean = torch.tensor([.5, .5, .5], dtype=torch.float32).view(1, 3, 1, 1)
self.std = torch.tensor([.5, .5, .5], dtype=torch.float32).view(1, 3, 1, 1)
self.depth = depth
self.write_lut_output = write_lut_output
self.write_lut_map = write_lut_map
self.backbone_checkpoint = backbone_path
# issam
self.device = device
if self.backbone_type == 'issam':
self.backbone = SSAMImageHarmonization(
depth=self.depth, ch=32, image_fusion=True, attention_mid_k=0.5,
attend_from=2, batchnorm_from=2)
load_weights(self.backbone, self.backbone_checkpoint)
elif self.backbone_type == 'rain':
self.backbone = RainNet(input_nc=cfg.input_nc,
output_nc=cfg.output_nc,
ngf=cfg.ngf,
norm_layer=RAIN,
use_dropout=not cfg.no_dropout)
state_dict = torch.load(self.backbone_checkpoint)
rainutil.copy_state_dict(self.backbone.state_dict(), state_dict)
#
self.lut_dim = 33
self.lut_generator = TridistributeGeneraotr()
self.trilinear = TrilinearInterpolation(fix_threshold, k_threshold)
# dbp refinement
#self.refine_network = SimpleRefine(feature_channels=32, inner_channel=64)
def init_device(self, input_device):
if self.device is None:
self.device = input_device
self.mean = self.mean.to(self.device)
self.std = self.std.to(self.device)
def mask_denormalize(self, tensor, mask, original):
self.init_device(tensor.device)
tmp_res = tensor * self.std + self.mean
return tmp_res*mask + original * (1-mask)
def normalize(self, tensor):
self.init_device(tensor.device)
return (tensor - self.mean) / self.std
def denormalize(self, tensor):
self.init_device(tensor.device)
return tensor * self.std + self.mean
def train(self, mode=True):
self.backbone.eval()
#self.refine_network.train()
def eval(self):
self.backbone.eval()
#self.refine_network.eval()
def forward(self, image, mask, backbone_features=None, previous={}, names = [], direct_lutoutput=None, direct_lut_map=None):
#print(previous)
torch.cuda.synchronize()
t0 = time.time()
get_lut_time = 0
divide_time = 0
tri_linear_time = 0
previous_images = previous['images']
previous_masks = previous['masks']
#print(previous_images.shape)
torch.cuda.synchronize()
t1 = time.time()
##issam step
normaled_images = self.normalize(image)
with torch.no_grad():
if self.backbone_type == 'issam':
cur_backbone_result = self.backbone(normaled_images, mask)
cur_backbone_output, cur_backbone_feature = cur_backbone_result['images'], cur_backbone_result['features']
elif self.backbone_type == 'rain':
cur_backbone_feature, cur_backbone_output = self.backbone.processImage(normaled_images, mask)
denormaled_cur_backbone_output = self.mask_denormalize(cur_backbone_output, mask, image)
if not self.use_lutoutput:
previous_num = previous_images.shape[1]
pre_backbone_outputs = []
for index in range(previous_num):
normaled_previous_images = self.normalize(previous_images[:, index, :, :, :])
with torch.no_grad():
if self.backbone_type == 'issam':
pre_backbone_result = self.backbone(normaled_previous_images, previous_masks[:, index, :, :, :])
normaled_pre_backbone_output = pre_backbone_result['images']
elif self.backbone_type == 'rain':
normaled_pre_backbone_feature, normaled_pre_backbone_output = self.backbone.processImage(normaled_previous_images, previous_masks[:, index, :, :, :])
pre_backbone_outputs.append(self.mask_denormalize(normaled_pre_backbone_output, previous_masks[:, index, :, :, :], previous_images[:, index, :, :, :]))
pre_backbone_outputs = torch.stack(pre_backbone_outputs, dim =1)
batch = image.shape[0]
w = previous_masks.shape[-2]
h = previous_masks.shape[-1]
torch.cuda.synchronize()
tmp_previous_masks = torch.reshape(previous_masks.permute((0, 2, 1, 3, 4)),
(batch, 1, w * previous_num, h))
tmp_previous_images = torch.reshape(previous_images.permute((0, 2, 1, 3, 4)),
(batch, 3, w * previous_num, h))
tmp_pre_backbone_outputs = torch.reshape(pre_backbone_outputs.permute((0, 2, 1, 3, 4)),
(batch, 3, w * previous_num, h))
luts, lut_counts, _ = self.lut_generator(tmp_previous_masks,
tmp_previous_images,
tmp_pre_backbone_outputs)
_, lut_output = self.trilinear(lut_counts, luts, image)
lut_output = lut_output * mask + image * (1 - mask)
lut_map = self.trilinear.count_map(lut_counts, image)
lut_map = lut_map * mask
###
_, pre_lut_output = self.trilinear(lut_counts, luts, tmp_previous_images)
pre_lut_map = self.trilinear.count_map(lut_counts, tmp_previous_images)
pre_lut_map = pre_lut_map * tmp_previous_masks
pre_lut_output = (1 - pre_lut_map) * pre_lut_output + pre_lut_map * tmp_pre_backbone_outputs
pre_lut_output = pre_lut_output * tmp_previous_masks + (1 - tmp_previous_masks) * tmp_previous_images
tmp_pre_backbone_outputs = tmp_pre_backbone_outputs * tmp_previous_masks + (1 - tmp_previous_masks) * tmp_pre_backbone_outputs
invalid_rate = 0
for b in range(batch):
lut_m = lut_map[b].detach().cpu().numpy()
_, lw, lh = lut_m.shape
invalid_rate += lut_m.sum() / lw / lh
total_me = 0
for b in range(batch):
tmp_fore = tmp_previous_masks[b].detach().cpu().numpy().sum()
pre_lut_output_single = torch.clamp(pre_lut_output[b] * 255, 0, 255)
tmp_pre_backbone_output = torch.clamp(tmp_pre_backbone_outputs[b] * 255, 0, 255)
_, w, h = pre_lut_output_single.shape
total_me += mse(pre_lut_output_single.detach().cpu().numpy(), tmp_pre_backbone_output.detach().cpu().numpy()) * w * h / tmp_fore
lut_output = lut_output * (1 - lut_map) + lut_map * denormaled_cur_backbone_output
else:
lut_output = direct_lutoutput
lut_map = direct_lut_map
batch = image.shape[0]
if len(self.write_lut_output) > 0:
for b in range(batch):
video, obj, img_num = names[b].split('/')[-3:]
new_name = video + '_' + obj + '_' + img_num[:-3] + 'npy'
np.save(os.path.join(self.write_lut_output, new_name), lut_output[b].detach().cpu().numpy())
np.save(os.path.join(self.write_lut_map, new_name), lut_map[b].detach().cpu().numpy())
lut_output = lut_output * (1-lut_map) + lut_map *denormaled_cur_backbone_output
lut_output = lut_output * mask + image * (1 - mask)
return {"images":lut_output, "backbone_out":denormaled_cur_backbone_output, "lut_output":lut_output, "me":total_me, "invalid":invalid_rate}
| 9,471 | 44.538462 | 173 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/ssam_model.py | import torch
from functools import partial
from torch import nn as nn
import time
from iharm.model.modeling.basic_blocks import ConvBlock, GaussianSmoothing
from iharm.model.modeling.unet import UNetEncoder, UNetDecoder
from iharm.model.ops import ChannelAttention
class SSAMImageHarmonization(nn.Module):
def __init__(
self,
depth,
norm_layer=nn.BatchNorm2d, batchnorm_from=2,
attend_from=3, attention_mid_k=2.0,
need_normalize = False,
image_fusion=True,
ch=64, max_channels=512,
backbone_from=-1, backbone_channels=None, backbone_mode=''
):
super(SSAMImageHarmonization, self).__init__()
self.depth = depth
self.device = None
self.need_normalize = need_normalize
self.mean = torch.tensor([.485, .456, .406], dtype=torch.float32).view(1, 3, 1, 1)
self.std = torch.tensor([.229, .224, .225], dtype=torch.float32).view(1, 3, 1, 1)
self.encoder = UNetEncoder(
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels, backbone_mode
)
self.decoder = UNetDecoder(
depth, self.encoder.block_channels,
norm_layer,
attention_layer=partial(SpatialSeparatedAttention, mid_k=attention_mid_k),
attend_from=attend_from,
image_fusion=image_fusion
)
def init_device(self, input_device):
if self.device is None:
self.device = input_device
self.mean = self.mean.to(self.device)
self.std = self.std.to(self.device)
def mask_denormalize(self, tensor, mask, original):
self.init_device(tensor.device)
tmp_res = tensor * self.std + self.mean
return tmp_res*mask + original * (1-mask)
def normalize(self, tensor):
self.init_device(tensor.device)
return (tensor - self.mean) / self.std
def denormalize(self, tensor):
self.init_device(tensor.device)
return tensor * self.std + self.mean
def forward(self, image, mask, backbone_features=None):
if self.need_normalize:
image = self.normalize(image)
x = torch.cat((image, mask), dim=1)
intermediates = self.encoder(x, backbone_features)
output, output_map = self.decoder(intermediates, image, mask)
if self.need_normalize:
output = self.mask_denormalize(output, mask, image)
#print("net cost:", start.elapsed_time(end))
return {"images":output, "features":output_map}
class SpatialSeparatedAttention(nn.Module):
def __init__(self, in_channels, norm_layer, activation, mid_k=2.0):
super(SpatialSeparatedAttention, self).__init__()
self.background_gate = ChannelAttention(in_channels)
self.foreground_gate = ChannelAttention(in_channels)
self.mix_gate = ChannelAttention(in_channels)
mid_channels = int(mid_k * in_channels)
self.learning_block = nn.Sequential(
ConvBlock(
in_channels, mid_channels,
kernel_size=3, stride=1, padding=1,
norm_layer=norm_layer, activation=activation,
bias=False,
),
ConvBlock(
mid_channels, in_channels,
kernel_size=3, stride=1, padding=1,
norm_layer=norm_layer, activation=activation,
bias=False,
),
)
self.mask_blurring = GaussianSmoothing(1, 7, 1, padding=3)
def forward(self, x, mask):
mask = self.mask_blurring(nn.functional.interpolate(
mask, size=x.size()[-2:],
mode='bilinear', align_corners=True
))
background = self.background_gate(x)
foreground = self.learning_block(self.foreground_gate(x))
mix = self.mix_gate(x)
output = mask * (foreground + mix) + (1 - mask) * background
return output
| 3,972 | 36.130841 | 90 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/rain/models/base_model.py | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = '%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
#lr = self.optimizers[0].param_groups[0]['lr']
#print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict, strict=False)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 10,345 | 43.787879 | 260 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/rain/models/rainnet_model.py | import torch
from .base_model import BaseModel
from . import networks
import torch.nn.functional as F
from torch import nn, cuda
from torch.autograd import Variable
class RainNetModel(BaseModel):
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake', 'D_gp', 'D_global', 'D_local', 'G_global', 'G_local']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['comp', 'real', 'output', 'mask', 'real_f', 'fake_f', 'bg', 'attentioned']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else:
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.relu = nn.ReLU()
if self.isTrain:
self.gan_mode = opt.gan_mode
netD = networks.NLayerDiscriminator(opt.output_nc, opt.ndf, opt.n_layers_D, networks.get_norm_layer(opt.normD))
self.netD = networks.init_net(netD, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr*opt.g_lr_ratio, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr*opt.d_lr_ratio, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
self.iter_cnt = 0
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
"""
self.comp = input['comp'].to(self.device)
self.real = input['real'].to(self.device)
self.mask = input['mask'].to(self.device)
self.inputs = self.comp
if self.opt.input_nc == 4:
self.inputs = torch.cat([self.inputs, self.mask], 1) # channel-wise concatenation
self.real_f = self.real * self.mask
self.bg = self.real * (1 - self.mask)
def forward(self):
self.output = self.netG(self.inputs, self.mask)
self.fake_f = self.output * self.mask
self.attentioned = self.output * self.mask + self.inputs[:,:3,:,:] * (1 - self.mask)
self.harmonized = self.attentioned
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake;
fake_AB = self.harmonized
pred_fake, ver_fake = self.netD(fake_AB.detach(), self.mask)
if self.gan_mode == 'wgangp':
global_fake = self.relu(1 + pred_fake).mean()
local_fake = self.relu(1 + ver_fake).mean()
else:
global_fake = self.criterionGAN(pred_fake, False)
local_fake = self.criterionGAN(ver_fake, False)
self.loss_D_fake = global_fake + local_fake
# Real
real_AB = self.real
pred_real, ver_real = self.netD(real_AB, self.mask)
if self.gan_mode == 'wgangp':
global_real = self.relu(1 - pred_real).mean()
local_real = self.relu(1 - ver_real).mean()
else:
global_real = self.criterionGAN(pred_real, True)
local_real = self.criterionGAN(ver_real, True)
self.loss_D_real = global_real + local_real
self.loss_D_global = global_fake + global_real
self.loss_D_local = local_fake + local_real
gradient_penalty, gradients = networks.cal_gradient_penalty(self.netD, real_AB.detach(), fake_AB.detach(),
'cuda', mask=self.mask)
self.loss_D_gp = gradient_penalty
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real + self.opt.gp_ratio * gradient_penalty)
self.loss_D.backward(retain_graph=True)
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
fake_AB = self.harmonized
pred_fake, ver_fake, featg_fake, featl_fake = self.netD(fake_AB, self.mask, feat_loss=True)
self.loss_G_global = self.criterionGAN(pred_fake, True)
self.loss_G_local = self.criterionGAN(ver_fake, True)
self.loss_G_GAN =self.opt.lambda_a * self.loss_G_global + self.opt.lambda_v * self.loss_G_local
self.loss_G_L1 = self.criterionL1(self.attentioned, self.real) * self.opt.lambda_L1
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward(retain_graph=True)
def optimize_parameters(self):
self.forward()
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
| 5,980 | 47.626016 | 150 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/rain/models/networks.py | import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import functools
from torch.optim import lr_scheduler
from iharm.model.base.rain.models.normalize import RAIN
from torch.nn.utils import spectral_norm
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
norm_type = norm_type.lower()
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
elif norm_type.startswith('rain'):
norm_layer = RAIN
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False,
init_type='normal', init_gain=0.02, gpu_ids=[]):
"""load a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: rainnet
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
"""
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'rainnet':
net = RainNet(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, use_attention=True)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
"""
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.niter> epochs
and linearly decay the rate to zero over the next <opt.niter_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
self.relu = nn.ReLU()
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean() # self.relu(1-prediction.mean())
else:
loss = prediction.mean() # self.relu(1+prediction.mean())
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0, mask=None):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.unsqueeze(2).unsqueeze(3)
alpha = alpha.expand_as(real_data)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv, mask, gp=True)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True,
allow_unused=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
def get_act_conv(act, dims_in, dims_out, kernel, stride, padding, bias):
conv = [act]
conv.append(nn.Conv2d(dims_in, dims_out, kernel_size=kernel, stride=stride, padding=padding, bias=bias))
return nn.Sequential(*conv)
def get_act_dconv(act, dims_in, dims_out, kernel, stride, padding, bias):
conv = [act]
conv.append(nn.ConvTranspose2d(dims_in, dims_out, kernel_size=kernel, stride=2, padding=1, bias=False))
return nn.Sequential(*conv)
class RainNet(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=RAIN,
norm_type_indicator=[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
use_dropout=False, use_attention=True):
super(RainNet, self).__init__()
self.input_nc = input_nc
self.norm_namebuffer = ['RAIN']
self.use_dropout = use_dropout
self.use_attention = use_attention
norm_type_list = [get_norm_layer('instance'), norm_layer]
# -------------------------------Network Settings-------------------------------------
self.model_layer0 = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1, bias=False)
self.model_layer1 = get_act_conv(nn.LeakyReLU(0.2, True), ngf, ngf*2, 4, 2, 1, False)
self.model_layer1norm = norm_type_list[norm_type_indicator[0]](ngf*2)
self.model_layer2 = get_act_conv(nn.LeakyReLU(0.2, True), ngf*2, ngf*4, 4, 2, 1, False)
self.model_layer2norm = norm_type_list[norm_type_indicator[1]](ngf*4)
self.model_layer3 = get_act_conv(nn.LeakyReLU(0.2, True), ngf*4, ngf*8, 4, 2, 1, False)
self.model_layer3norm = norm_type_list[norm_type_indicator[2]](ngf*8)
unet_block = UnetBlockCodec(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer,
innermost=True, enc=norm_type_indicator[6], dec=norm_type_indicator[7]) # add the innermost layer
unet_block = UnetBlockCodec(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer,
use_dropout=use_dropout, enc=norm_type_indicator[5], dec=norm_type_indicator[8])
unet_block = UnetBlockCodec(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer,
use_dropout=use_dropout, enc=norm_type_indicator[4], dec=norm_type_indicator[9])
self.unet_block = UnetBlockCodec(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer,
use_dropout=use_dropout, enc=norm_type_indicator[3], dec=norm_type_indicator[10])
self.model_layer11 = get_act_dconv(nn.ReLU(True), ngf*16, ngf*4, 4, 2, 1, False)
self.model_layer11norm = norm_type_list[norm_type_indicator[11]](ngf*4)
if use_attention:
self.model_layer11att = nn.Sequential(nn.Conv2d(ngf*8, ngf*8, kernel_size=1, stride=1), nn.Sigmoid())
self.model_layer12 = get_act_dconv(nn.ReLU(True), ngf*8, ngf*2, 4, 2, 1, False)
self.model_layer12norm = norm_type_list[norm_type_indicator[12]](ngf*2)
if use_attention:
self.model_layer12att = nn.Sequential(nn.Conv2d(ngf*4, ngf*4, kernel_size=1, stride=1), nn.Sigmoid())
self.model_layer13 = get_act_dconv(nn.ReLU(True), ngf*4, ngf, 4, 2, 1, False)
self.model_layer13norm = norm_type_list[norm_type_indicator[13]](ngf)
if use_attention:
self.model_layer13att = nn.Sequential(nn.Conv2d(ngf*2, ngf*2, kernel_size=1, stride=1), nn.Sigmoid())
self.upsample = torch.nn.Upsample(scale_factor = 2, mode = 'bilinear')
self.model_out = nn.Sequential(nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, output_nc, kernel_size=4, stride=2, padding=1), nn.Tanh())
def forward(self, x, mask):
x0 = self.model_layer0(x)
x1 = self.model_layer1(x0)
if self.model_layer1norm._get_name() in self.norm_namebuffer:
x1 = self.model_layer1norm(x1, mask)
else:
x1 = self.model_layer1norm(x1)
x2 = self.model_layer2(x1)
if self.model_layer2norm._get_name() in self.norm_namebuffer:
x2 = self.model_layer2norm(x2, mask)
else:
x2 = self.model_layer2norm(x2)
x3 = self.model_layer3(x2)
if self.model_layer3norm._get_name() in self.norm_namebuffer:
x3 = self.model_layer3norm(x3, mask)
else:
x3 = self.model_layer3norm(x3)
ox3 = self.unet_block(x3, mask)
ox2 = self.model_layer11(ox3)
if self.model_layer11norm._get_name() in self.norm_namebuffer:
ox2 = self.model_layer11norm(ox2, mask)
else:
ox2 = self.model_layer11norm(ox2)
ox2 = torch.cat([x2, ox2], 1)
if self.use_attention:
ox2 = self.model_layer11att(ox2) * ox2
ox1 = self.model_layer12(ox2)
if self.model_layer12norm._get_name() in self.norm_namebuffer:
ox1 = self.model_layer12norm(ox1, mask)
else:
ox1 = self.model_layer12norm(ox1)
ox1 = torch.cat([x1, ox1], 1)
if self.use_attention:
ox1 = self.model_layer12att(ox1) * ox1
ox0 = self.model_layer13(ox1)
if self.model_layer13norm._get_name() in self.norm_namebuffer:
ox0 = self.model_layer13norm(ox0, mask)
else:
ox0 = self.model_layer13norm(ox0)
ox0 = torch.cat([x0, ox0], 1)
if self.use_attention:
ox0 = self.model_layer13att(ox0) * ox0
features = self.upsample(ox0)
out = self.model_out(ox0)
return features, out
def processImage(self, x, mask, background=None):
if background is not None:
x = x*mask + background * (1 - mask)
if self.input_nc == 4:
x = torch.cat([x, mask], dim=1) # (bs, 4, 256, 256)
feature, pred = self.forward(x, mask)
return feature, pred * mask + x[:,:3,:,:] * (1 - mask)
class UnetBlockCodec(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False,
norm_layer=RAIN, use_dropout=False, use_attention=False, enc=True, dec=True):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetBlockCodec) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
user_dropout (bool) -- if use dropout layers.
enc (bool) -- if use give norm_layer in encoder part.
dec (bool) -- if use give norm_layer in decoder part.
"""
super(UnetBlockCodec, self).__init__()
self.outermost = outermost
self.innermost = innermost
self.use_dropout = use_dropout
self.use_attention = use_attention
use_bias = False
if input_nc is None:
input_nc = outer_nc
self.norm_namebuffer = ['RAIN', 'RAIN_Method_Learnable', 'RAIN_Method_BN']
if outermost:
self.down = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.submodule = submodule
self.up = nn.Sequential(
nn.ReLU(True),
nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
elif innermost:
self.up = nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias),
nn.ReLU(True),
nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
)
self.upnorm = norm_layer(outer_nc) if dec else get_norm_layer('instance')(outer_nc)
else:
self.down = nn.Sequential(
nn.LeakyReLU(0.2, True),
nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias),
)
self.downnorm = norm_layer(inner_nc) if enc else get_norm_layer('instance')(inner_nc)
self.submodule = submodule
self.up = nn.Sequential(
nn.ReLU(True),
nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias),
)
self.upnorm = norm_layer(outer_nc) if dec else get_norm_layer('instance')(outer_nc)
if use_dropout:
self.dropout = nn.Dropout(0.5)
if use_attention:
attention_conv = nn.Conv2d(outer_nc+input_nc, outer_nc+input_nc, kernel_size=1)
attention_sigmoid = nn.Sigmoid()
self.attention = nn.Sequential(*[attention_conv, attention_sigmoid])
def forward(self, x, mask):
if self.outermost:
x = self.down(x)
x = self.submodule(x, mask)
ret = self.up(x)
return ret
elif self.innermost:
ret = self.up(x)
if self.upnorm._get_name() in self.norm_namebuffer:
ret = self.upnorm(ret, mask)
else:
ret = self.upnorm(ret)
ret = torch.cat([x, ret], 1)
if self.use_attention:
return self.attention(ret) * ret
return ret
else:
ret = self.down(x)
if self.downnorm._get_name() in self.norm_namebuffer:
ret = self.downnorm(ret, mask)
else:
ret = self.downnorm(ret)
ret = self.submodule(ret, mask)
ret = self.up(ret)
if self.upnorm._get_name() in self.norm_namebuffer:
ret = self.upnorm(ret, mask)
else:
ret = self.upnorm(ret)
if self.use_dropout: # only works for middle features
ret = self.dropout(ret)
ret = torch.cat([x, ret], 1)
if self.use_attention:
return self.attention(ret) * ret
return ret
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
class PartialConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
# whether the mask is multi-channel or not
if 'multi_channel' in kwargs:
self.multi_channel = kwargs['multi_channel']
kwargs.pop('multi_channel')
else:
self.multi_channel = False
self.return_mask = True
super(PartialConv2d, self).__init__(*args, **kwargs)
if self.multi_channel:
self.weight_maskUpdater = torch.ones(self.out_channels, self.in_channels, self.kernel_size[0],
self.kernel_size[1])
else:
self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0], self.kernel_size[1])
self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] * \
self.weight_maskUpdater.shape[3]
self.last_size = (None, None, None, None)
self.update_mask = None
self.mask_ratio = None
def forward(self, input, mask_in=None):
assert len(input.shape) == 4
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
# if mask is not provided, create a mask
if self.multi_channel:
mask = torch.ones(input.data.shape[0], input.data.shape[1], input.data.shape[2],
input.data.shape[3]).to(input)
else:
mask = torch.ones(1, 1, input.data.shape[2], input.data.shape[3]).to(input)
else:
mask = mask_in
self.update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride,
padding=self.padding, dilation=self.dilation, groups=1)
self.mask_ratio = self.slide_winsize / (self.update_mask + 1e-8)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
raw_out = super(PartialConv2d, self).forward(torch.mul(input, mask) if mask_in is not None else input)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
class OrgDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=6, norm_layer=nn.BatchNorm2d, global_stages=0):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(OrgDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 3
padw = 0
self.conv1 = spectral_norm(PartialConv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw))
if global_stages < 1:
self.conv1f = spectral_norm(PartialConv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw))
else:
self.conv1f = self.conv1
self.relu1 = nn.LeakyReLU(0.2, True)
nf_mult = 1
nf_mult_prev = 1
n = 1
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv2 = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norm2 = norm_layer(ndf * nf_mult)
if global_stages < 2:
self.conv2f = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norm2f = norm_layer(ndf * nf_mult)
else:
self.conv2f = self.conv2
self.norm2f = self.norm2
self.relu2 = nn.LeakyReLU(0.2, True)
n = 2
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv3 = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norm3 = norm_layer(ndf * nf_mult)
if global_stages < 3:
self.conv3f = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norm3f = norm_layer(ndf * nf_mult)
else:
self.conv3f = self.conv3
self.norm3f = self.norm3
self.relu3 = nn.LeakyReLU(0.2, True)
n = 3
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.norm4 = norm_layer(ndf * nf_mult)
self.conv4 = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.conv4f = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norm4f = norm_layer(ndf * nf_mult)
self.relu4 = nn.LeakyReLU(0.2, True)
n = 4
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv5 = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.conv5f = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norm5 = norm_layer(ndf * nf_mult)
self.norm5f = norm_layer(ndf * nf_mult)
self.relu5 = nn.LeakyReLU(0.2, True)
n = 5
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
self.conv6 = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.conv6f = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias))
self.norm6 = norm_layer(ndf * nf_mult)
self.norm6f = norm_layer(ndf * nf_mult)
self.relu6 = nn.LeakyReLU(0.2, True)
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
self.conv7 = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias))
self.conv7f = spectral_norm(
PartialConv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias))
def forward(self, input, mask=None):
x = input
x, _ = self.conv1(x)
x = self.relu1(x)
x, _ = self.conv2(x)
x = self.norm2(x)
x = self.relu2(x)
x, _ = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
x, _ = self.conv4(x)
x = self.norm4(x)
x = self.relu4(x)
x, _ = self.conv5(x)
x = self.norm5(x)
x = self.relu5(x)
x, _ = self.conv6(x)
x = self.norm6(x)
x = self.relu6(x)
x, _ = self.conv7(x)
"""Standard forward."""
xf, xb = input, input
mf, mb = mask, 1 - mask
xf, mf = self.conv1f(xf, mf)
xf = self.relu1(xf)
xf, mf = self.conv2f(xf, mf)
xf = self.norm2f(xf)
xf = self.relu2(xf)
xf, mf = self.conv3f(xf, mf)
xf = self.norm3f(xf)
xf = self.relu3(xf)
xf, mf = self.conv4f(xf, mf)
xf = self.norm4f(xf)
xf = self.relu4(xf)
xf, mf = self.conv5f(xf, mf)
xf = self.norm5f(xf)
xf = self.relu5(xf)
xf, mf = self.conv6f(xf, mf)
xf = self.norm6f(xf)
xf = self.relu6(xf)
xf, mf = self.conv7f(xf, mf)
xb, mb = self.conv1f(xb, mb)
xb = self.relu1(xb)
xb, mb = self.conv2f(xb, mb)
xb = self.norm2f(xb)
xb = self.relu2(xb)
xb, mb = self.conv3f(xb, mb)
xb = self.norm3f(xb)
xb = self.relu3(xb)
xb, mb = self.conv4f(xb, mb)
xb = self.norm4f(xb)
xb = self.relu4(xb)
xb, mb = self.conv5f(xb, mb)
xb = self.norm5f(xb)
xb = self.relu5(xb)
xb, mb = self.conv6f(xb, mb)
xb = self.norm6f(xb)
xb = self.relu6(xb)
xb, mb = self.conv7f(xb, mb)
return x, xf, xb
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=6, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
num_outputs = ndf * min(2 ** n_layers, 8)
self.D = OrgDiscriminator(input_nc, ndf, n_layers, norm_layer)
self.convl1 = spectral_norm(nn.Conv2d(num_outputs, num_outputs, kernel_size=1, stride=1))
self.relul1 = nn.LeakyReLU(0.2)
self.convl2 = spectral_norm(nn.Conv2d(num_outputs, num_outputs, kernel_size=1, stride=1))
self.relul2 = nn.LeakyReLU(0.2)
self.convl3 = nn.Conv2d(num_outputs, 1, kernel_size=1, stride=1)
self.convg3 = nn.Conv2d(num_outputs, 1, kernel_size=1, stride=1)
def forward(self, input, mask=None, gp=False, feat_loss=False):
x, xf, xb = self.D(input, mask)
feat_l, feat_g = torch.cat([xf, xb])
x = self.convg3(x)
sim = xf * xb
sim = self.convl1(sim)
sim = self.relul1(sim)
sim = self.convl2(sim)
sim = self.relul2(sim)
sim = self.convl3(sim)
sim_sum = sim
if not gp:
if feat_loss:
return x, sim_sum, feat_g, feat_l
return x, sim_sum
return (x + sim_sum) * 0.5
| 35,535 | 43.812106 | 143 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/rain/models/normalize.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class RAIN(nn.Module):
def __init__(self, dims_in, eps=1e-5):
'''Compute the instance normalization within only the background region, in which
the mean and standard variance are measured from the features in background region.
'''
super(RAIN, self).__init__()
self.foreground_gamma = nn.Parameter(torch.zeros(dims_in), requires_grad=True)
self.foreground_beta = nn.Parameter(torch.zeros(dims_in), requires_grad=True)
self.background_gamma = nn.Parameter(torch.zeros(dims_in), requires_grad=True)
self.background_beta = nn.Parameter(torch.zeros(dims_in), requires_grad=True)
self.eps = eps
def forward(self, x, mask):
mask = F.interpolate(mask.detach(), size=x.size()[2:], mode='nearest')
mean_back, std_back = self.get_foreground_mean_std(x * (1-mask), 1 - mask) # the background features
normalized = (x - mean_back) / std_back
normalized_background = (normalized * (1 + self.background_gamma[None, :, None, None]) +
self.background_beta[None, :, None, None]) * (1 - mask)
mean_fore, std_fore = self.get_foreground_mean_std(x * mask, mask) # the background features
normalized = (x - mean_fore) / std_fore * std_back + mean_back
normalized_foreground = (normalized * (1 + self.foreground_gamma[None, :, None, None]) +
self.foreground_beta[None, :, None, None]) * mask
return normalized_foreground + normalized_background
def get_foreground_mean_std(self, region, mask):
sum = torch.sum(region, dim=[2, 3]) # (B, C)
num = torch.sum(mask, dim=[2, 3]) # (B, C)
mu = sum / (num + self.eps)
mean = mu[:, :, None, None]
var = torch.sum((region + (1 - mask)*mean - mean) ** 2, dim=[2, 3]) / (num + self.eps)
var = var[:, :, None, None]
return mean, torch.sqrt(var+self.eps)
| 2,025 | 49.65 | 108 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/rain/util/image_pool.py | import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
| 2,226 | 39.490909 | 140 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/rain/util/spectral_norm.py | """
Spectral Normalization from https://arxiv.org/abs/1802.05957
"""
import torch
from torch.nn.functional import normalize
class SpectralNorm(object):
# Invariant before and after each forward call:
# u = normalize(W @ v)
# NB: At initialization, this invariant is not enforced
_version = 1
# At version 1:
# made `W` not a buffer,
# added `v` as a buffer, and
# made eval mode use `W = u @ W_orig @ v` rather than the stored `W`.
def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
self.name = name
self.dim = dim
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def reshape_weight_to_matrix(self, weight):
weight_mat = weight
if self.dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(self.dim,
*[d for d in range(weight_mat.dim()) if d != self.dim])
height = weight_mat.size(0)
return weight_mat.reshape(height, -1)
def compute_weight(self, module, do_power_iteration):
# NB: If `do_power_iteration` is set, the `u` and `v` vectors are
# updated in power iteration **in-place**. This is very important
# because in `DataParallel` forward, the vectors (being buffers) are
# broadcast from the parallelized module to each module replica,
# which is a new module object created on the fly. And each replica
# runs its own spectral norm power iteration. So simply assigning
# the updated vectors to the module this function runs on will cause
# the update to be lost forever. And the next time the parallelized
# module is replicated, the same randomly initialized vectors are
# broadcast and used!
#
# Therefore, to make the change propagate back, we rely on two
# important behaviors (also enforced via tests):
# 1. `DataParallel` doesn't clone storage if the broadcast tensor
# is already on correct device; and it makes sure that the
# parallelized module is already on `device[0]`.
# 2. If the out tensor in `out=` kwarg has correct shape, it will
# just fill in the values.
# Therefore, since the same power iteration is performed on all
# devices, simply updating the tensors in-place will make sure that
# the module replica on `device[0]` will update the _u vector on the
# parallized module (by shared storage).
#
# However, after we update `u` and `v` in-place, we need to **clone**
# them before using them to normalize the weight. This is to support
# backproping through two forward passes, e.g., the common pattern in
# GAN training: loss = D(real) - D(fake). Otherwise, engine will
# complain that variables needed to do backward for the first forward
# (i.e., the `u` and `v` vectors) are changed in the second forward.
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
v = getattr(module, self.name + '_v')
weight_mat = self.reshape_weight_to_matrix(weight)
if do_power_iteration:
with torch.no_grad():
for _ in range(self.n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)
u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
if self.n_power_iterations > 0:
# See above on why we need to clone
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = weight / sigma
return weight
def remove(self, module):
with torch.no_grad():
weight = self.compute_weight(module, do_power_iteration=False)
delattr(module, self.name)
delattr(module, self.name + '_u')
delattr(module, self.name + '_v')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training))
def _solve_v_and_rescale(self, weight_mat, u, target_sigma):
# Tries to returns a vector `v` s.t. `u = normalize(W @ v)`
# (the invariant at top of this class) and `u @ W @ v = sigma`.
# This uses pinverse in case W^T W is not invertible.
v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)).squeeze(1)
return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v)))
@staticmethod
def apply(module, name, n_power_iterations, dim, eps):
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
raise RuntimeError("Cannot register two spectral_norm hooks on "
"the same parameter {}".format(name))
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
with torch.no_grad():
weight_mat = fn.reshape_weight_to_matrix(weight)
h, w = weight_mat.size()
# randomly initialize `u` and `v`
u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps)
v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a plain
# attribute.
setattr(module, fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_buffer(fn.name + "_v", v)
module.register_forward_pre_hook(fn)
module._register_state_dict_hook(SpectralNormStateDictHook(fn))
module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn))
return fn
# This is a top level class because Py2 pickle doesn't like inner class nor an
# instancemethod.
class SpectralNormLoadStateDictPreHook(object):
# See docstring of SpectralNorm._version on the changes to spectral_norm.
def __init__(self, fn):
self.fn = fn
# For state_dict with version None, (assuming that it has gone through at
# least one training forward), we have
#
# u = normalize(W_orig @ v)
# W = W_orig / sigma, where sigma = u @ W_orig @ v
#
# To compute `v`, we solve `W_orig @ x = u`, and let
# v = x / (u @ W_orig @ x) * (W / W_orig).
def __call__(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
fn = self.fn
version = local_metadata.get('spectral_norm', {}).get(fn.name + '.version', None)
if version is None or version < 1:
weight_key = prefix + fn.name
if version is None and all(weight_key + s in state_dict for s in ('_orig', '_u', '_v')) and \
weight_key not in state_dict:
# Detect if it is the updated state dict and just missing metadata.
# This could happen if the users are crafting a state dict themselves,
# so we just pretend that this is the newest.
return
has_missing_keys = False
for suffix in ('_orig', '', '_u'):
key = weight_key + suffix
if key not in state_dict:
has_missing_keys = True
if strict:
missing_keys.append(key)
if has_missing_keys:
return
with torch.no_grad():
weight_orig = state_dict[weight_key + '_orig']
weight = state_dict.pop(weight_key)
sigma = (weight_orig / weight).mean()
weight_mat = fn.reshape_weight_to_matrix(weight_orig)
u = state_dict[weight_key + '_u']
v = fn._solve_v_and_rescale(weight_mat, u, sigma)
state_dict[weight_key + '_v'] = v
# This is a top level class because Py2 pickle doesn't like inner class nor an
# instancemethod.
class SpectralNormStateDictHook(object):
# See docstring of SpectralNorm._version on the changes to spectral_norm.
def __init__(self, fn):
self.fn = fn
def __call__(self, module, state_dict, prefix, local_metadata):
if 'spectral_norm' not in local_metadata:
local_metadata['spectral_norm'] = {}
key = self.fn.name + '.version'
if key in local_metadata['spectral_norm']:
raise RuntimeError("Unexpected key in metadata['spectral_norm']: {}".format(key))
local_metadata['spectral_norm'][key] = self.fn._version
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})},
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generative Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectral norm
eps (float, optional): epsilon for numerical stability in
calculating norms
dim (int, optional): dimension corresponding to number of outputs,
the default is ``0``, except for modules that are instances of
ConvTranspose{1,2,3}d, when it is ``1``
Returns:
The original module with the spectral norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
>>> m
Linear(in_features=20, out_features=40, bias=True)
>>> m.weight_u.size()
torch.Size([40])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
return module
def remove_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
break
else:
raise ValueError("spectral_norm of '{}' not found in {}".format(
name, module))
for k, hook in module._state_dict_hooks.items():
if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name:
del module._state_dict_hooks[k]
break
for k, hook in module._load_state_dict_pre_hooks.items():
if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name:
del module._load_state_dict_pre_hooks[k]
break
return module | 13,029 | 46.039711 | 118 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/base/rain/util/util.py | """This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path,quality=100) #added by Mia (quality)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def copy_state_dict(cur_state_dict, pre_state_dict, prefix='', load_name=None):
def _get_params(key):
key = prefix + key
if key in pre_state_dict:
return pre_state_dict[key]
return None
for k in cur_state_dict.keys():
if load_name is not None:
if load_name not in k:
continue
v = _get_params(k)
try:
if v is None:
#print('parameter {} not found'.format(k))
continue
cur_state_dict[k].copy_(v)
except:
#print('copy param {} failed'.format(k))
continue | 3,846 | 30.276423 | 119 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/basic_blocks.py | import math
import numbers
import torch
import torch.nn.functional as F
from torch import nn as nn
from torch.nn.utils import spectral_norm
class ConvBlock(nn.Module):
def __init__(
self,
in_channels, out_channels,
kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation=nn.ELU,
bias=True,
):
super(ConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias),
norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
activation(),
)
# self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
# self.norm = norm_layer(out_channels) if norm_layer is not None else nn.Identity()
# self.act = activation()
def forward(self, x):
# x = self.conv(x)
# x = self.norm(x)
# x = self.act(x)
return self.block(x)
class DeconvBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation=nn.PReLU,
bias=True,
):
super(DeconvBlock, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias),
norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
activation(),
)
# self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
# self.norm = norm_layer(out_channels) if norm_layer is not None else nn.Identity()
# self.act = activation()
def forward(self, x):
# x = self.deconv(x)
# x = self.norm(x)
# x = self.act(x)
return self.block(x)
class ResBlock(nn.Module):
def __init__(self, dim, norm_layer, kernel_size=3, padding=1, activation=nn.ReLU(True), use_dropout=False):
super(ResBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, kernel_size, padding, norm_layer, activation, use_dropout)
def build_conv_block(self, dim, kernel_size, padding, norm_layer, activation, use_dropout):
if isinstance(padding, tuple):
padding = (padding[1], padding[1], padding[0], padding[0])
conv_block = []
conv_block += [
# norm_layer(dim) if norm_layer is not None else nn.Identity(),
# activation,
nn.ReplicationPad2d(padding),
nn.Conv2d(dim, dim, kernel_size=kernel_size),
norm_layer(dim) if norm_layer is not None else nn.Identity(),
activation,
]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [
# norm_layer(dim) if norm_layer is not None else nn.Identity(),
# activation,
nn.ReplicationPad2d(padding),
nn.Conv2d(dim, dim, kernel_size=kernel_size),
norm_layer(dim) if norm_layer is not None else nn.Identity(),
activation,
]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class SepConvHead(nn.Module):
def __init__(self, num_outputs, in_channels, mid_channels, num_layers=1,
kernel_size=3, padding=1, dropout_ratio=0.0, dropout_indx=0,
norm_layer=nn.BatchNorm2d):
super(SepConvHead, self).__init__()
sepconvhead = []
for i in range(num_layers):
sepconvhead.append(
SeparableConv2d(in_channels=in_channels if i == 0 else mid_channels,
out_channels=mid_channels,
dw_kernel=kernel_size, dw_padding=padding,
norm_layer=norm_layer, activation='relu')
)
if dropout_ratio > 0 and dropout_indx == i:
sepconvhead.append(nn.Dropout(dropout_ratio))
sepconvhead.append(
nn.Conv2d(in_channels=mid_channels, out_channels=num_outputs, kernel_size=1, padding=0)
)
self.layers = nn.Sequential(*sepconvhead)
def forward(self, *inputs):
x = inputs[0]
return self.layers(x)
def select_activation_function(activation):
if isinstance(activation, str):
if activation.lower() == 'relu':
return nn.ReLU
elif activation.lower() == 'softplus':
return nn.Softplus
else:
raise ValueError(f"Unknown activation type {activation}")
elif isinstance(activation, nn.Module):
return activation
else:
raise ValueError(f"Unknown activation type {activation}")
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, dw_kernel, dw_padding, dw_stride=1,
activation=None, use_bias=False, norm_layer=None):
super(SeparableConv2d, self).__init__()
_activation = select_activation_function(activation)
self.body = nn.Sequential(
nn.Conv2d(in_channels, in_channels, kernel_size=dw_kernel, stride=dw_stride,
padding=dw_padding, bias=use_bias, groups=in_channels),
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=use_bias),
norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
_activation()
)
def forward(self, x):
return self.body(x)
class GaussianSmoothing(nn.Module):
"""
https://discuss.pytorch.org/t/is-there-anyway-to-do-gaussian-filtering-for-an-image-2d-3d-in-pytorch/12351/10
Apply gaussian smoothing on a tensor (1d, 2d, 3d).
Filtering is performed seperately for each channel in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors.
Output will have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data. Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, padding=0, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the gaussian function of each dimension.
kernel = 1.
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for size, std, grid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2.
kernel *= torch.exp(-((grid - mean) / std) ** 2 / 2) / (std * (2 * math.pi) ** 0.5)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight.
kernel = kernel.view(1, 1, *kernel.size())
kernel = torch.repeat_interleave(kernel, channels, 0)
self.register_buffer('weight', kernel)
self.groups = channels
self.padding = padding
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
Returns:
filtered (torch.Tensor): Filtered output.
"""
return self.conv(input, weight=self.weight, padding=self.padding, groups=self.groups)
class MaxPoolDownSize(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, depth):
super(MaxPoolDownSize, self).__init__()
self.depth = depth
self.reduce_conv = ConvBlock(in_channels, mid_channels, kernel_size=1, stride=1, padding=0)
self.convs = nn.ModuleList([
ConvBlock(mid_channels, out_channels, kernel_size=3, stride=1, padding=1)
for conv_i in range(depth)
])
self.pool2d = nn.MaxPool2d(kernel_size=2)
def forward(self, x):
outputs = []
output = self.reduce_conv(x)
for conv_i, conv in enumerate(self.convs):
output = output if conv_i == 0 else self.pool2d(output)
outputs.append(conv(output))
return outputs
class UpPBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks=3, st=1, padding=1, scale_factor=2, norm='none', activation='relu',
pad_type='zero', use_bias=True, activation_first=False):
super(UpPBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'sn':
self.norm = nn.Identity()
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'elu':
self.activation = nn.ELU()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
self.conv = nn.ModuleList([
nn.Conv2d(in_dim, out_dim * (scale_factor ** 2), ks, st, bias=self.use_bias),
nn.PixelShuffle(scale_factor)]
)
if norm == 'sn':
self.conv = nn.Sequential(*[
spectral_norm(nn.Conv2d(in_dim, out_dim * (scale_factor ** 2), ks, st, bias=self.use_bias)),
nn.PixelShuffle(scale_factor)
])
def forward(self, x):
if self.activation_first:
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class DBUpsample(nn.Module):
def __init__(self, in_channel, out_channel, ks=4, st=2, padding=1, bias=True, activation='relu', norm='bn',
activation_first=False):
super(DBUpsample, self).__init__()
padding = (ks - 1) // 2
ngf = out_channel
self.up_conv1 = UpPBlock(in_channel, ngf, scale_factor=2, norm=norm, use_bias=bias, activation=activation,
activation_first=activation_first)
self.down_conv1 = Conv2dBlock(ngf, ngf, ks, st, padding, norm=norm, use_bias=bias, activation=activation,
activation_first=activation_first)
self.up_conv2 = UpPBlock(ngf, ngf, scale_factor=2, norm=norm, use_bias=bias, activation=activation,
activation_first=activation_first)
def forward(self, x):
h0 = self.up_conv1(x)
l0 = self.down_conv1(h0)
h1 = self.up_conv2(l0 - x)
return h0 + h1
class DBDownsample(nn.Module):
def __init__(self, in_channel, out_channel, ks=4, st=2, padding=1, bias=True, activation='relu', norm='bn',
activation_first=False):
super(DBDownsample, self).__init__()
padding = (ks - 1) // 2
ngf = out_channel
if in_channel != out_channel:
self.in_conv = Conv2dBlock(in_channel, out_channel, 1, 1, 0, norm='none', activation=activation,
use_bias=bias, activation_first=activation_first)
else:
self.in_conv = None
self.down_conv1 = Conv2dBlock(ngf, ngf, ks, st, padding, norm=norm, activation=activation, use_bias=bias,
activation_first=activation_first)
self.up_conv1 = UpPBlock(ngf, ngf, scale_factor=2, norm=norm, use_bias=bias, activation=activation,
activation_first=activation_first)
self.down_conv2 = Conv2dBlock(ngf, ngf, ks, st, padding, norm=norm, activation=activation, use_bias=bias,
activation_first=activation_first)
def forward(self, x):
if self.in_conv:
x = self.in_conv(x)
l0 = self.down_conv1(x)
h0 = self.up_conv1(l0)
l1 = self.down_conv1(h0 - x)
return l0 + l1
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0,
norm='none', activation='relu', pad_type='zero',
use_bias=True, activation_first=False):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'sn':
self.norm = nn.Identity()
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'elu':
self.activation = nn.ELU()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
if norm == 'sn':
self.conv = spectral_norm(self.conv)
def forward(self, x):
if self.activation_first:
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class UpBlock(nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation=nn.PReLU, norm=None):
super(UpBlock, self).__init__()
self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, norm_layer=None,
activation=activation)
self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, norm_layer=None,
activation=activation)
self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, norm_layer=None,
activation=activation)
def forward(self, x):
h0 = self.up_conv1(x)
l0 = self.up_conv2(h0)
h1 = self.up_conv3(l0 - x)
return h1 + h0
class DownBlock(nn.Module):
def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias=True, activation=nn.PReLU, norm=None):
super(DownBlock, self).__init__()
self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, norm_layer=None,
activation=activation)
self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, norm_layer=None,
activation=activation)
self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, norm_layer=None,
activation=activation)
def forward(self, x):
l0 = self.down_conv1(x)
h0 = self.down_conv2(l0)
l1 = self.down_conv3(h0 - x)
return l1 + l0 | 17,780 | 38.425721 | 126 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/deeplab_v3.py | from contextlib import ExitStack
import torch
from torch import nn
import torch.nn.functional as F
from iharm.model.modeling.basic_blocks import select_activation_function
from .basic_blocks import SeparableConv2d
from .resnet import ResNetBackbone
class DeepLabV3Plus(nn.Module):
def __init__(self, backbone='resnet50', norm_layer=nn.BatchNorm2d,
backbone_norm_layer=None,
ch=256,
project_dropout=0.5,
inference_mode=False,
**kwargs):
super(DeepLabV3Plus, self).__init__()
if backbone_norm_layer is None:
backbone_norm_layer = norm_layer
self.backbone_name = backbone
self.norm_layer = norm_layer
self.backbone_norm_layer = backbone_norm_layer
self.inference_mode = False
self.ch = ch
self.aspp_in_channels = 2048
self.skip_project_in_channels = 256 # layer 1 out_channels
self._kwargs = kwargs
if backbone == 'resnet34':
self.aspp_in_channels = 512
self.skip_project_in_channels = 64
self.backbone = ResNetBackbone(backbone=self.backbone_name, pretrained_base=False,
norm_layer=self.backbone_norm_layer, **kwargs)
self.head = _DeepLabHead(in_channels=ch + 32, mid_channels=ch, out_channels=ch,
norm_layer=self.norm_layer)
self.skip_project = _SkipProject(self.skip_project_in_channels, 32, norm_layer=self.norm_layer)
self.aspp = _ASPP(in_channels=self.aspp_in_channels,
atrous_rates=[12, 24, 36],
out_channels=ch,
project_dropout=project_dropout,
norm_layer=self.norm_layer)
if inference_mode:
self.set_prediction_mode()
def load_pretrained_weights(self):
pretrained = ResNetBackbone(backbone=self.backbone_name, pretrained_base=True,
norm_layer=self.backbone_norm_layer, **self._kwargs)
backbone_state_dict = self.backbone.state_dict()
pretrained_state_dict = pretrained.state_dict()
backbone_state_dict.update(pretrained_state_dict)
self.backbone.load_state_dict(backbone_state_dict)
if self.inference_mode:
for param in self.backbone.parameters():
param.requires_grad = False
def set_prediction_mode(self):
self.inference_mode = True
self.eval()
def forward(self, x, mask_features=None):
with ExitStack() as stack:
if self.inference_mode:
stack.enter_context(torch.no_grad())
c1, _, c3, c4 = self.backbone(x, mask_features)
c1 = self.skip_project(c1)
x = self.aspp(c4)
x = F.interpolate(x, c1.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x, c1), dim=1)
x = self.head(x)
return x,
class _SkipProject(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d):
super(_SkipProject, self).__init__()
_activation = select_activation_function("relu")
self.skip_project = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
norm_layer(out_channels),
_activation()
)
def forward(self, x):
return self.skip_project(x)
class _DeepLabHead(nn.Module):
def __init__(self, out_channels, in_channels, mid_channels=256, norm_layer=nn.BatchNorm2d):
super(_DeepLabHead, self).__init__()
self.block = nn.Sequential(
SeparableConv2d(in_channels=in_channels, out_channels=mid_channels, dw_kernel=3,
dw_padding=1, activation='relu', norm_layer=norm_layer),
SeparableConv2d(in_channels=mid_channels, out_channels=mid_channels, dw_kernel=3,
dw_padding=1, activation='relu', norm_layer=norm_layer),
nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1)
)
def forward(self, x):
return self.block(x)
class _ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels=256,
project_dropout=0.5, norm_layer=nn.BatchNorm2d):
super(_ASPP, self).__init__()
b0 = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=False),
norm_layer(out_channels),
nn.ReLU()
)
rate1, rate2, rate3 = tuple(atrous_rates)
b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer)
b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer)
b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer)
b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer)
self.concurent = nn.ModuleList([b0, b1, b2, b3, b4])
project = [
nn.Conv2d(in_channels=5*out_channels, out_channels=out_channels,
kernel_size=1, bias=False),
norm_layer(out_channels),
nn.ReLU()
]
if project_dropout > 0:
project.append(nn.Dropout(project_dropout))
self.project = nn.Sequential(*project)
def forward(self, x):
x = torch.cat([block(x) for block in self.concurent], dim=1)
return self.project(x)
class _AsppPooling(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer):
super(_AsppPooling, self).__init__()
self.gap = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=1, bias=False),
norm_layer(out_channels),
nn.ReLU()
)
def forward(self, x):
pool = self.gap(x)
return F.interpolate(pool, x.size()[2:], mode='bilinear', align_corners=True)
def _ASPPConv(in_channels, out_channels, atrous_rate, norm_layer):
block = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=3, padding=atrous_rate,
dilation=atrous_rate, bias=False),
norm_layer(out_channels),
nn.ReLU()
)
return block
| 6,392 | 35.118644 | 103 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/resnet.py | import torch
from .resnetv1b import resnet34_v1b, resnet50_v1s, resnet101_v1s, resnet152_v1s
class ResNetBackbone(torch.nn.Module):
def __init__(self, backbone='resnet50', pretrained_base=True, dilated=True, **kwargs):
super(ResNetBackbone, self).__init__()
if backbone == 'resnet34':
pretrained = resnet34_v1b(pretrained=pretrained_base, dilated=dilated, **kwargs)
elif backbone == 'resnet50':
pretrained = resnet50_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs)
elif backbone == 'resnet101':
pretrained = resnet101_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs)
elif backbone == 'resnet152':
pretrained = resnet152_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs)
else:
raise RuntimeError(f'unknown backbone: {backbone}')
self.conv1 = pretrained.conv1
self.bn1 = pretrained.bn1
self.relu = pretrained.relu
self.maxpool = pretrained.maxpool
self.layer1 = pretrained.layer1
self.layer2 = pretrained.layer2
self.layer3 = pretrained.layer3
self.layer4 = pretrained.layer4
def forward(self, x, mask_features=None):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if mask_features is not None:
x = x + mask_features
x = self.maxpool(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
return c1, c2, c3, c4
| 1,552 | 35.97619 | 93 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/lut.py | import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import time
import numpy as np
import math
import trilinear
import cv2
import random
import tridistribute
class TridistributeGeneraotrFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, mask, input, output):
dim = 33
#count to zero or one ?
#depend on the initialization of lut
#lut_count = torch.ones(lut.size())
t1 = time.time()
#print(input.device)
batch = input.size(0)
torch.cuda.set_device(int((str(input.device))[-1]))
size = torch.Size((batch, 3, dim, dim, dim))
lut = torch.zeros(size, device=input.device)
lut_count = torch.zeros(size, device=input.device)
input = input.contiguous()
output = output.contiguous()
lut = lut.contiguous()
mask = mask.contiguous()
lut_count = lut_count.contiguous()
#dim = lut.size()[-1]
binsize = 1.000001 / (dim - 1)
W = input.size()[-1]
H = input.size()[-2]
shift = dim ** 3
t2 = time.time()
#print(input[index:index+1, :, :, :], output[index:index+1, :, :, :])
assert 1 == tridistribute.forward(
mask,
lut,
lut_count,
input,
output,
dim,
shift,
binsize,
W,
H,
batch
)
#lut_count[:, 1, :, :, :] = lut_count[:, 2, :, :, :] = lut_count[:, 0, :, :, :]
#print("in lut")
#print(lut.sum(), lut_count.sum())
assert 1 == tridistribute.divide(
lut,
lut_count,
dim,
shift,
batch
)
#print(lut.sum(), lut_count.sum())
#print("in lut")
int_package = torch.IntTensor([dim, shift, W, H, batch])
float_package = torch.FloatTensor([binsize])
variables = [mask, lut_count, input, int_package, float_package]
ctx.save_for_backward(*variables)
#print(lut_count)
t2 = time.time()
#print("::",t2 - t1)
return lut, lut_count, output
@staticmethod
def divide(lut, lut_count):
#print("divide here")
dim = lut.size()[-1]
shift = dim ** 3
batch = lut.size()[0]
assert 1 == tridistribute.divide(
lut,
lut_count,
dim,
shift,
batch
)
return lut, lut_count
@staticmethod
def backward(ctx, lut_grad):
mask, lut_count, input, int_package, float_package = ctx.saved_variables
dim, shift, W, H, batch = int_package
dim, shift, W, H, batch = int(dim), int(shift), int(W), int(H), int(batch)
binsize = float(float_package[0])
output_grad = lut_grad.new(input.size())
assert 1 == tridistribute.backward(
mask,
input,
output_grad,
lut_count,
lut_grad,
dim,
shift,
binsize,
W,
H,
batch
)
return None, output_grad, lut_grad
class TridistributeGeneraotr(torch.nn.Module):
def __init__(self):
super(TridistributeGeneraotr, self).__init__()
def forward(self, mask, input, output):
return TridistributeGeneraotrFunction.apply(mask, input, output)
def divide(self, lut, lut_count):
return TridistributeGeneraotrFunction.divide(lut, lut_count)
class TrilinearInterpolationFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, lut_count, lut, x, fix_threshold = 0.01, k_threshold = 1):
x = x.contiguous()
output = x.new(x.size())
#output_eff = x.new(torch.Size([x.size()[0],x.size()[2], x.size()[3]]))
dim = lut.size()[-1]
shift = dim ** 3
binsize = 1.000001 / (dim-1)
W = x.size(2)
H = x.size(3)
batch = x.size(0)
#fix_threshold = 0.01
#k_threshold = 1
assert 1 == trilinear.forward(lut_count,
lut,
x,
output,
dim,
shift,
binsize,
W,
H,
batch)
int_package = torch.IntTensor([dim, shift, W, H, batch])
float_package = torch.FloatTensor([binsize])
variables = [lut, x, int_package, float_package]
ctx.save_for_backward(*variables)
return lut, output
@staticmethod
def backward(ctx, lut_grad, x_grad):
lut, x, int_package, float_package = ctx.saved_variables
dim, shift, W, H, batch = int_package
dim, shift, W, H, batch = int(dim), int(shift), int(W), int(H), int(batch)
binsize = float(float_package[0])
assert 1 == trilinear.backward(x,
x_grad,
lut_grad,
dim,
shift,
binsize,
W,
H,
batch)
return lut_grad, x_grad
@staticmethod
def count_map(lut_count, x):
x = x.contiguous()
lut_count = lut_count.contiguous()
#size = torch.Size((batch, 3, dim, dim, dim))
output_map = x.new(x.size())
output_map = output_map - output_map
#print(output_map)
# output_eff = x.new(torch.Size([x.size()[0],x.size()[2], x.size()[3]]))
dim = lut_count.size()[-1]
shift = dim ** 3
binsize = 1.000001 / (dim - 1)
W = x.size(2)
H = x.size(3)
batch = x.size(0)
assert 1 == trilinear.map_count(lut_count,
x,
output_map,
dim,
shift,
binsize,
W,
H,
batch)
return output_map
class TrilinearInterpolation(torch.nn.Module):
def __init__(self, fix_threshold=0.01, k_threshold=1):
super(TrilinearInterpolation, self).__init__()
self.fix_threshold = fix_threshold
self.k_threshold = k_threshold
def forward(self, lut_count, lut, x):
return TrilinearInterpolationFunction.apply(lut_count, lut, x, self.fix_threshold, self.k_threshold)
def count_map(self, lut_count, x):
return TrilinearInterpolationFunction.count_map(lut_count, x)
| 7,045 | 28.855932 | 108 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/hrnet_ocr.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._utils
from .ocr import SpatialOCR_Module, SpatialGather_Module
from .resnetv1b import BasicBlockV1b, BottleneckV1b
from iharm.utils.log import logger
relu_inplace = True
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method,multi_scale_output=True,
norm_layer=nn.BatchNorm2d, align_corners=True):
super(HighResolutionModule, self).__init__()
self._check_branches(num_branches, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.norm_layer = norm_layer
self.align_corners = align_corners
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=relu_inplace)
def _check_branches(self, num_branches, num_blocks, num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(num_channels[branch_index] * block.expansion),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride,
downsample=downsample, norm_layer=self.norm_layer))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index],
norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(in_channels=num_inchannels[j],
out_channels=num_inchannels[i],
kernel_size=1,
bias=False),
self.norm_layer(num_inchannels[i])))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
kernel_size=3, stride=2, padding=1, bias=False),
self.norm_layer(num_outchannels_conv3x3)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
kernel_size=3, stride=2, padding=1, bias=False),
self.norm_layer(num_outchannels_conv3x3),
nn.ReLU(inplace=relu_inplace)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
self.fuse_layers[i][j](x[j]),
size=[height_output, width_output],
mode='bilinear', align_corners=self.align_corners)
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
class HighResolutionNet(nn.Module):
def __init__(self, width, num_classes, ocr_width=256, small=False,
norm_layer=nn.BatchNorm2d, align_corners=True):
super(HighResolutionNet, self).__init__()
self.norm_layer = norm_layer
self.width = width
self.ocr_width = ocr_width
self.ocr_on = ocr_width > 0
self.align_corners = align_corners
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = norm_layer(64)
self.relu = nn.ReLU(inplace=relu_inplace)
num_blocks = 2 if small else 4
stage1_num_channels = 64
self.layer1 = self._make_layer(BottleneckV1b, 64, stage1_num_channels, blocks=num_blocks)
stage1_out_channel = BottleneckV1b.expansion * stage1_num_channels
self.stage2_num_branches = 2
num_channels = [width, 2 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_inchannels)
self.stage2, pre_stage_channels = self._make_stage(
BasicBlockV1b, num_inchannels=num_inchannels, num_modules=1, num_branches=self.stage2_num_branches,
num_blocks=2 * [num_blocks], num_channels=num_channels)
self.stage3_num_branches = 3
num_channels = [width, 2 * width, 4 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_inchannels)
self.stage3, pre_stage_channels = self._make_stage(
BasicBlockV1b, num_inchannels=num_inchannels,
num_modules=3 if small else 4, num_branches=self.stage3_num_branches,
num_blocks=3 * [num_blocks], num_channels=num_channels)
self.stage4_num_branches = 4
num_channels = [width, 2 * width, 4 * width, 8 * width]
num_inchannels = [
num_channels[i] * BasicBlockV1b.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_inchannels)
self.stage4, pre_stage_channels = self._make_stage(
BasicBlockV1b, num_inchannels=num_inchannels, num_modules=2 if small else 3,
num_branches=self.stage4_num_branches,
num_blocks=4 * [num_blocks], num_channels=num_channels)
if self.ocr_on:
last_inp_channels = np.int(np.sum(pre_stage_channels))
ocr_mid_channels = 2 * ocr_width
ocr_key_channels = ocr_width
self.conv3x3_ocr = nn.Sequential(
nn.Conv2d(last_inp_channels, ocr_mid_channels,
kernel_size=3, stride=1, padding=1),
norm_layer(ocr_mid_channels),
nn.ReLU(inplace=relu_inplace),
)
self.ocr_gather_head = SpatialGather_Module(num_classes)
self.ocr_distri_head = SpatialOCR_Module(in_channels=ocr_mid_channels,
key_channels=ocr_key_channels,
out_channels=ocr_mid_channels,
scale=1,
dropout=0.05,
norm_layer=norm_layer,
align_corners=align_corners)
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
kernel_size=3,
stride=1,
padding=1,
bias=False),
self.norm_layer(num_channels_cur_layer[i]),
nn.ReLU(inplace=relu_inplace)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i - num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(inchannels, outchannels,
kernel_size=3, stride=2, padding=1, bias=False),
self.norm_layer(outchannels),
nn.ReLU(inplace=relu_inplace)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(inplanes, planes, stride,
downsample=downsample, norm_layer=self.norm_layer))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, norm_layer=self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(self, block, num_inchannels,
num_modules, num_branches, num_blocks, num_channels,
fuse_method='SUM',
multi_scale_output=True):
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output,
norm_layer=self.norm_layer,
align_corners=self.align_corners)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x, mask=None, additional_features=None):
hrnet_feats = self.compute_hrnet_feats(x, additional_features)
if not self.ocr_on:
return hrnet_feats,
ocr_feats = self.conv3x3_ocr(hrnet_feats)
mask = nn.functional.interpolate(mask, size=ocr_feats.size()[2:], mode='bilinear', align_corners=True)
context = self.ocr_gather_head(ocr_feats, mask)
ocr_feats = self.ocr_distri_head(ocr_feats, context)
return ocr_feats,
def compute_hrnet_feats(self, x, additional_features, return_list=False):
x = self.compute_pre_stage_features(x, additional_features)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_num_branches):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_num_branches):
if self.transition2[i] is not None:
if i < self.stage2_num_branches:
x_list.append(self.transition2[i](y_list[i]))
else:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_num_branches):
if self.transition3[i] is not None:
if i < self.stage3_num_branches:
x_list.append(self.transition3[i](y_list[i]))
else:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
if return_list:
return x
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w),
mode='bilinear', align_corners=self.align_corners)
x2 = F.interpolate(x[2], size=(x0_h, x0_w),
mode='bilinear', align_corners=self.align_corners)
x3 = F.interpolate(x[3], size=(x0_h, x0_w),
mode='bilinear', align_corners=self.align_corners)
return torch.cat([x[0], x1, x2, x3], 1)
def compute_pre_stage_features(self, x, additional_features):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if additional_features is not None:
x = x + additional_features
x = self.conv2(x)
x = self.bn2(x)
return self.relu(x)
def load_pretrained_weights(self, pretrained_path=''):
model_dict = self.state_dict()
if not os.path.exists(pretrained_path):
print(f'\nFile "{pretrained_path}" does not exist.')
print('You need to specify the correct path to the pre-trained weights.\n'
'You can download the weights for HRNet from the repository:\n'
'https://github.com/HRNet/HRNet-Image-Classification')
exit(1)
pretrained_dict = torch.load(pretrained_path, map_location={'cuda:0': 'cpu'})
pretrained_dict = {k.replace('last_layer', 'aux_head').replace('model.', ''): v for k, v in
pretrained_dict.items()}
params_count = len(pretrained_dict)
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
logger.info(f'Loaded {len(pretrained_dict)} of {params_count} pretrained parameters for HRNet')
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
| 17,393 | 42.376559 | 111 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/ocr.py | import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, h, w = probs.size(0), probs.size(1), probs.size(2), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1) # batch x hw x c
probs = F.softmax(self.scale * probs, dim=2) # batch x k x hw
ocr_context = torch.matmul(probs, feats) \
.permute(0, 2, 1).unsqueeze(3).contiguous() # batch x k x c
return ocr_context
class SpatialOCR_Module(nn.Module):
"""
Implementation of the OCR module:
We aggregate the global object representation to update the representation for each pixel.
"""
def __init__(self,
in_channels,
key_channels,
out_channels,
scale=1,
dropout=0.1,
norm_layer=nn.BatchNorm2d,
align_corners=True):
super(SpatialOCR_Module, self).__init__()
self.object_context_block = ObjectAttentionBlock2D(in_channels, key_channels, scale,
norm_layer, align_corners)
_in_channels = 2 * in_channels
self.conv_bn_dropout = nn.Sequential(
nn.Conv2d(_in_channels, out_channels, kernel_size=1, padding=0, bias=False),
nn.Sequential(norm_layer(out_channels), nn.ReLU(inplace=True)),
nn.Dropout2d(dropout)
)
def forward(self, feats, proxy_feats):
context = self.object_context_block(feats, proxy_feats)
output = self.conv_bn_dropout(torch.cat([context, feats], 1))
return output
class ObjectAttentionBlock2D(nn.Module):
'''
The basic implementation for object context block
Input:
N X C X H X W
Parameters:
in_channels : the dimension of the input feature map
key_channels : the dimension after the key/query transform
scale : choose the scale to downsample the input feature maps (save memory cost)
bn_type : specify the bn type
Return:
N X C X H X W
'''
def __init__(self,
in_channels,
key_channels,
scale=1,
norm_layer=nn.BatchNorm2d,
align_corners=True):
super(ObjectAttentionBlock2D, self).__init__()
self.scale = scale
self.in_channels = in_channels
self.key_channels = key_channels
self.align_corners = align_corners
self.pool = nn.MaxPool2d(kernel_size=(scale, scale))
self.f_pixel = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True)),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True))
)
self.f_object = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True)),
nn.Conv2d(in_channels=self.key_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True))
)
self.f_down = nn.Sequential(
nn.Conv2d(in_channels=self.in_channels, out_channels=self.key_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.key_channels), nn.ReLU(inplace=True))
)
self.f_up = nn.Sequential(
nn.Conv2d(in_channels=self.key_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0, bias=False),
nn.Sequential(norm_layer(self.in_channels), nn.ReLU(inplace=True))
)
def forward(self, x, proxy):
batch_size, h, w = x.size(0), x.size(2), x.size(3)
if self.scale > 1:
x = self.pool(x)
query = self.f_pixel(x).view(batch_size, self.key_channels, -1)
query = query.permute(0, 2, 1)
key = self.f_object(proxy).view(batch_size, self.key_channels, -1)
value = self.f_down(proxy).view(batch_size, self.key_channels, -1)
value = value.permute(0, 2, 1)
sim_map = torch.matmul(query, key)
sim_map = (self.key_channels ** -.5) * sim_map
sim_map = F.softmax(sim_map, dim=-1)
# add bg context ...
context = torch.matmul(sim_map, value)
context = context.permute(0, 2, 1).contiguous()
context = context.view(batch_size, self.key_channels, *x.size()[2:])
context = self.f_up(context)
if self.scale > 1:
context = F.interpolate(input=context, size=(h, w),
mode='bilinear', align_corners=self.align_corners)
return context
| 5,740 | 39.429577 | 100 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/conv_autoencoder.py | import torch
from torch import nn as nn
from iharm.model.modeling.basic_blocks import ConvBlock
from iharm.model.ops import MaskedChannelAttention, FeaturesConnector
class ConvEncoder(nn.Module):
def __init__(
self,
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels=None, backbone_mode=''
):
super(ConvEncoder, self).__init__()
self.depth = depth
self.backbone_from = backbone_from
backbone_channels = [] if backbone_channels is None else backbone_channels[::-1]
in_channels = 4
out_channels = ch
self.block0 = ConvBlock(in_channels, out_channels, norm_layer=norm_layer if batchnorm_from == 0 else None)
self.block1 = ConvBlock(out_channels, out_channels, norm_layer=norm_layer if 0 <= batchnorm_from <= 1 else None)
self.blocks_channels = [out_channels, out_channels]
self.blocks_connected = nn.ModuleDict()
self.connectors = nn.ModuleDict()
for block_i in range(2, depth):
if block_i % 2:
in_channels = out_channels
else:
in_channels, out_channels = out_channels, min(2 * out_channels, max_channels)
if 0 <= backbone_from <= block_i and len(backbone_channels):
stage_channels = backbone_channels.pop()
connector = FeaturesConnector(backbone_mode, in_channels, stage_channels, in_channels)
self.connectors[f'connector{block_i}'] = connector
in_channels = connector.output_channels
self.blocks_connected[f'block{block_i}'] = ConvBlock(
in_channels, out_channels,
norm_layer=norm_layer if 0 <= batchnorm_from <= block_i else None,
padding=int(block_i < depth - 1)
)
self.blocks_channels += [out_channels]
def forward(self, x, backbone_features):
backbone_features = [] if backbone_features is None else backbone_features[::-1]
outputs = [self.block0(x)]
outputs += [self.block1(outputs[-1])]
for block_i in range(2, self.depth):
block = self.blocks_connected[f'block{block_i}']
output = outputs[-1]
connector_name = f'connector{block_i}'
if connector_name in self.connectors:
stage_features = backbone_features.pop()
connector = self.connectors[connector_name]
output = connector(output, stage_features)
outputs += [block(output)]
return outputs[::-1]
class DeconvDecoder(nn.Module):
def __init__(self, depth, encoder_blocks_channels, norm_layer, attend_from=-1, image_fusion=False):
super(DeconvDecoder, self).__init__()
self.image_fusion = image_fusion
self.deconv_blocks = nn.ModuleList()
in_channels = encoder_blocks_channels.pop()
out_channels = in_channels
for d in range(depth):
out_channels = encoder_blocks_channels.pop() if len(encoder_blocks_channels) else in_channels // 2
self.deconv_blocks.append(SEDeconvBlock(
in_channels, out_channels,
norm_layer=norm_layer,
padding=0 if d == 0 else 1,
with_se=0 <= attend_from <= d
))
in_channels = out_channels
if self.image_fusion:
self.conv_attention = nn.Conv2d(out_channels, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(out_channels, 3, kernel_size=1)
def forward(self, encoder_outputs, image, mask=None):
output = encoder_outputs[0]
for block, skip_output in zip(self.deconv_blocks[:-1], encoder_outputs[1:]):
output = block(output, mask)
output = output + skip_output
output = self.deconv_blocks[-1](output, mask)
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(output))
output = attention_map * image + (1.0 - attention_map) * self.to_rgb(output)
else:
output = self.to_rgb(output)
return output
class SEDeconvBlock(nn.Module):
def __init__(
self,
in_channels, out_channels,
kernel_size=4, stride=2, padding=1,
norm_layer=nn.BatchNorm2d, activation=nn.ELU,
with_se=False
):
super(SEDeconvBlock, self).__init__()
self.with_se = with_se
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding),
norm_layer(out_channels) if norm_layer is not None else nn.Identity(),
activation(),
)
if self.with_se:
self.se = MaskedChannelAttention(out_channels)
def forward(self, x, mask=None):
out = self.block(x)
if self.with_se:
out = self.se(out, mask)
return out
| 4,940 | 37.601563 | 120 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/unet.py | import torch
from torch import nn as nn
from functools import partial
from iharm.model.modeling.basic_blocks import ConvBlock
from iharm.model.ops import FeaturesConnector
class UNetEncoder(nn.Module):
def __init__(
self,
depth, ch,
norm_layer, batchnorm_from, max_channels,
backbone_from, backbone_channels=None, backbone_mode=''
):
super(UNetEncoder, self).__init__()
self.depth = depth
self.backbone_from = backbone_from
self.block_channels = []
backbone_channels = [] if backbone_channels is None else backbone_channels[::-1]
relu = partial(nn.ReLU, inplace=True)
in_channels = 4
out_channels = ch
self.block0 = UNetDownBlock(
in_channels, out_channels,
norm_layer=norm_layer if batchnorm_from == 0 else None,
activation=relu,
pool=True, padding=1,
)
self.block_channels.append(out_channels)
in_channels, out_channels = out_channels, min(2 * out_channels, max_channels)
self.block1 = UNetDownBlock(
in_channels, out_channels,
norm_layer=norm_layer if 0 <= batchnorm_from <= 1 else None,
activation=relu,
pool=True, padding=1,
)
self.block_channels.append(out_channels)
self.blocks_connected = nn.ModuleDict()
self.connectors = nn.ModuleDict()
for block_i in range(2, depth):
in_channels, out_channels = out_channels, min(2 * out_channels, max_channels)
if 0 <= backbone_from <= block_i and len(backbone_channels):
stage_channels = backbone_channels.pop()
connector = FeaturesConnector(backbone_mode, in_channels, stage_channels, in_channels)
self.connectors[f'connector{block_i}'] = connector
in_channels = connector.output_channels
self.blocks_connected[f'block{block_i}'] = UNetDownBlock(
in_channels, out_channels,
norm_layer=norm_layer if 0 <= batchnorm_from <= block_i else None,
activation=relu, padding=1,
pool=block_i < depth - 1,
)
self.block_channels.append(out_channels)
def forward(self, x, backbone_features):
backbone_features = [] if backbone_features is None else backbone_features[::-1]
outputs = []
block_input = x
output, block_input = self.block0(block_input)
outputs.append(output)
output, block_input = self.block1(block_input)
outputs.append(output)
for block_i in range(2, self.depth):
block = self.blocks_connected[f'block{block_i}']
connector_name = f'connector{block_i}'
if connector_name in self.connectors:
stage_features = backbone_features.pop()
connector = self.connectors[connector_name]
block_input = connector(block_input, stage_features)
output, block_input = block(block_input)
outputs.append(output)
return outputs[::-1]
class UNetDecoder(nn.Module):
def __init__(self, depth, encoder_blocks_channels, norm_layer,
attention_layer=None, attend_from=3, image_fusion=False):
super(UNetDecoder, self).__init__()
self.up_blocks = nn.ModuleList()
self.image_fusion = image_fusion
in_channels = encoder_blocks_channels.pop()
out_channels = in_channels
# Last encoder layer doesn't pool, so there're only (depth - 1) deconvs
for d in range(depth - 1):
out_channels = encoder_blocks_channels.pop() if len(encoder_blocks_channels) else in_channels // 2
stage_attention_layer = attention_layer if 0 <= attend_from <= d else None
self.up_blocks.append(UNetUpBlock(
in_channels, out_channels, out_channels,
norm_layer=norm_layer, activation=partial(nn.ReLU, inplace=True),
padding=1,
attention_layer=stage_attention_layer,
))
in_channels = out_channels
if self.image_fusion:
self.conv_attention = nn.Conv2d(out_channels, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(out_channels, 3, kernel_size=1)
def forward(self, encoder_outputs, input_image, mask):
output = encoder_outputs[0]
for block, skip_output in zip(self.up_blocks, encoder_outputs[1:]):
output = block(output, skip_output, mask)
output_map = output
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(output))
output = attention_map * input_image + (1.0 - attention_map) * self.to_rgb(output)
else:
output = self.to_rgb(output)
return output, output_map
class UNetDownBlock(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, activation, pool, padding):
super(UNetDownBlock, self).__init__()
self.convs = UNetDoubleConv(
in_channels, out_channels,
norm_layer=norm_layer, activation=activation, padding=padding,
)
self.pooling = nn.MaxPool2d(2, 2) if pool else nn.Identity()
def forward(self, x):
conv_x = self.convs(x)
return conv_x, self.pooling(conv_x)
class UNetUpBlock(nn.Module):
def __init__(
self,
in_channels_decoder, in_channels_encoder, out_channels,
norm_layer, activation, padding,
attention_layer,
):
super(UNetUpBlock, self).__init__()
self.upconv = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
ConvBlock(
in_channels_decoder, out_channels,
kernel_size=3, stride=1, padding=1,
norm_layer=None, activation=activation,
)
)
self.convs = UNetDoubleConv(
in_channels_encoder + out_channels, out_channels,
norm_layer=norm_layer, activation=activation, padding=padding,
)
if attention_layer is not None:
self.attention = attention_layer(in_channels_encoder + out_channels, norm_layer, activation)
else:
self.attention = None
def forward(self, x, encoder_out, mask=None):
upsample_x = self.upconv(x)
x_cat_encoder = torch.cat([encoder_out, upsample_x], dim=1)
if self.attention is not None:
x_cat_encoder = self.attention(x_cat_encoder, mask)
return self.convs(x_cat_encoder)
class UNetDoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, activation, padding):
super(UNetDoubleConv, self).__init__()
self.block = nn.Sequential(
ConvBlock(
in_channels, out_channels,
kernel_size=3, stride=1, padding=padding,
norm_layer=norm_layer, activation=activation,
),
ConvBlock(
out_channels, out_channels,
kernel_size=3, stride=1, padding=padding,
norm_layer=norm_layer, activation=activation,
),
)
def forward(self, x):
return self.block(x)
| 7,279 | 38.351351 | 110 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/resnetv1b.py | import torch
import torch.nn as nn
GLUON_RESNET_TORCH_HUB = 'rwightman/pytorch-pretrained-gluonresnet'
class BasicBlockV1b(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,
previous_dilation=1, norm_layer=nn.BatchNorm2d):
super(BasicBlockV1b, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=previous_dilation, dilation=previous_dilation, bias=False)
self.bn2 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class BottleneckV1b(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,
previous_dilation=1, norm_layer=nn.BatchNorm2d):
super(BottleneckV1b, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
class ResNetV1b(nn.Module):
""" Pre-trained ResNetV1b Model, which produces the strides of 8 featuremaps at conv5.
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used (default: :class:`nn.BatchNorm2d`)
deep_stem : bool, default False
Whether to replace the 7x7 conv1 with 3 3x3 convolution layers.
avg_down : bool, default False
Whether to use average pooling for projection skip connection between stages/downsample.
final_drop : float, default 0.0
Dropout ratio before the final classification layer.
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition."
Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
def __init__(self, block, layers, classes=1000, dilated=True, deep_stem=False, stem_width=32,
avg_down=False, final_drop=0.0, norm_layer=nn.BatchNorm2d):
self.inplanes = stem_width*2 if deep_stem else 64
super(ResNetV1b, self).__init__()
if not deep_stem:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
else:
self.conv1 = nn.Sequential(
nn.Conv2d(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False),
norm_layer(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False),
norm_layer(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, 2*stem_width, kernel_size=3, stride=1, padding=1, bias=False)
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], avg_down=avg_down,
norm_layer=norm_layer)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, avg_down=avg_down,
norm_layer=norm_layer)
if dilated:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2,
avg_down=avg_down, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4,
avg_down=avg_down, norm_layer=norm_layer)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
avg_down=avg_down, norm_layer=norm_layer)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
avg_down=avg_down, norm_layer=norm_layer)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.drop = None
if final_drop > 0.0:
self.drop = nn.Dropout(final_drop)
self.fc = nn.Linear(512 * block.expansion, classes)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
avg_down=False, norm_layer=nn.BatchNorm2d):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = []
if avg_down:
if dilation == 1:
downsample.append(
nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)
)
else:
downsample.append(
nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False)
)
downsample.extend([
nn.Conv2d(self.inplanes, out_channels=planes * block.expansion,
kernel_size=1, stride=1, bias=False),
norm_layer(planes * block.expansion)
])
downsample = nn.Sequential(*downsample)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, out_channels=planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
norm_layer(planes * block.expansion)
)
layers = []
if dilation in (1, 2):
layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample,
previous_dilation=dilation, norm_layer=norm_layer))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample,
previous_dilation=dilation, norm_layer=norm_layer))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation,
previous_dilation=dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.drop is not None:
x = self.drop(x)
x = self.fc(x)
return x
def _safe_state_dict_filtering(orig_dict, model_dict_keys):
filtered_orig_dict = {}
for k, v in orig_dict.items():
if k in model_dict_keys:
filtered_orig_dict[k] = v
else:
print(f"[ERROR] Failed to load <{k}> in backbone")
return filtered_orig_dict
def resnet34_v1b(pretrained=False, **kwargs):
model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet34_v1b', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet50_v1s(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, stem_width=64, **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet50_v1s', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet101_v1s(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, stem_width=64, **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet101_v1s', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
def resnet152_v1s(pretrained=False, **kwargs):
model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], deep_stem=True, stem_width=64, **kwargs)
if pretrained:
model_dict = model.state_dict()
filtered_orig_dict = _safe_state_dict_filtering(
torch.hub.load(GLUON_RESNET_TORCH_HUB, 'gluon_resnet152_v1s', pretrained=True).state_dict(),
model_dict.keys()
)
model_dict.update(filtered_orig_dict)
model.load_state_dict(model_dict)
return model
| 10,805 | 38.01083 | 112 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/model/modeling/dbp.py | import torch
from torch import nn as nn
from iharm.model.modeling.basic_blocks import ConvBlock, DBDownsample, DBUpsample, UpBlock, DownBlock
import torch.nn.functional as F
class SimpleRefine(nn.Module):
def __init__(self, feature_channels = 0, in_channel = 6, inner_channel = 32,
norm_layer = nn.BatchNorm2d, activation = nn.ELU, image_fusion = True):
super(SimpleRefine, self).__init__()
self.image_fusion = image_fusion
self.in_channel = in_channel
self.feature_channels = feature_channels
self.refine_block = nn.Sequential(
nn.Conv2d(feature_channels + in_channel, inner_channel, kernel_size=3, stride=1, padding=1),
norm_layer(inner_channel) if norm_layer is not None else nn.Identity(),
activation(),
nn.Conv2d(inner_channel, inner_channel, kernel_size=3, stride=1, padding=1),
norm_layer(inner_channel) if norm_layer is not None else nn.Identity(),
activation(),
)
if self.image_fusion:
self.conv_attention = nn.Conv2d(inner_channel, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(inner_channel, 3, 1,1,0)
def forward(self, ssam_out, lut_out, comp, ssam_feature=None):
if self.feature_channels > 0:
input = torch.cat([ssam_out, ssam_feature, lut_out], dim=1)
else:
input = torch.cat([ssam_out, lut_out], dim=1)
output_map = self.refine_block(input)
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(output_map))
output = attention_map * comp + (1.0 - attention_map) * self.to_rgb(output_map)
else:
output = self.to_rgb(output_map)
return output
class NewRes(nn.Module):
def __init__(self,feature_channels = 32,in_channel = 7,inner_channel=32,
norm_layer=nn.BatchNorm2d, activation=nn.ELU,image_fusion=True):
super(NewRes, self).__init__()
self.image_fusion = image_fusion
self.block = nn.Sequential(
nn.Conv2d(feature_channels + in_channel, inner_channel, kernel_size=3, stride=1, padding=1),
norm_layer(inner_channel) if norm_layer is not None else nn.Identity(),
activation(),
nn.Conv2d(inner_channel, inner_channel, kernel_size=3, stride=1, padding=1),
norm_layer(inner_channel) if norm_layer is not None else nn.Identity(),
activation(),
)
if self.image_fusion:
self.conv_attention = nn.Conv2d(inner_channel, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(inner_channel, 3, 1,1,0)
def forward(self, ssam_output, comp, mask, ssam_features,lut_output, target_resolution):
ssam_in = F.interpolate(ssam_output, size=target_resolution, mode='bilinear')
comp = F.interpolate(comp, size=target_resolution, mode='bilinear')
mask = F.interpolate(mask, size=target_resolution, mode='bilinear')
ssam_features = F.interpolate(ssam_features, size=target_resolution, mode='bilinear')
lut_in = F.interpolate(lut_output, size=target_resolution, mode='bilinear')
input_1 = torch.cat([ssam_in, lut_in, mask, ssam_features], dim=1)
output_map = self.block(input_1)
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(output_map))
output = attention_map * comp + (1.0 - attention_map) * self.to_rgb(output_map)
else:
output = self.to_rgb(output_map)
return output_map,output
class DBPNetv1(nn.Module):
def __init__(self, in_channels=7, feat_in_channels=32, minf=24, depth=2, image_fusion=False):
super(DBPNetv1, self).__init__()
self.hdconv_blocks = nn.ModuleList()
self.hddeconv_blocks = nn.ModuleList()
# depth = 2
nhdf = 32 #64
# minf = 24 # 32
# in_channels = 7
# feat_in_channels = 32
self.upsample = nn.Upsample(scale_factor=2)
self.depth = depth
# self.mode = hd_mode
self.image_fusion = image_fusion
out_chs = []
out_channels = minf
# self.in_conv = Conv2dBlock(in_channels, nhdf, 3,1,1, norm='none', activation='elu')
self.in_conv = nn.Sequential(*[
ConvBlock(in_channels, out_channels, kernel_size=3, stride=1, padding=1, norm_layer=None),
nn.Conv2d(out_channels, out_channels, 3,1,1),
])
self.feat_conv = nn.Sequential(*[
ConvBlock(feat_in_channels, out_channels, kernel_size=3, stride=1, padding=1, norm_layer=None),
nn.Conv2d(out_channels, out_channels, 3,1,1),
])
in_channels = nhdf
for d in range(depth):
out_channels = min(int(nhdf * (2**(d))), minf)
out_chs.append(out_channels)
if d != depth-1:
self.hdconv_blocks.append(nn.Sequential(*[
DBDownsample(out_channels, out_channels, ks=4, activation='elu', norm='sn', activation_first=False),
DBUpsample(out_channels, out_channels, ks=4, activation='elu', norm='sn',activation_first=False)
]))
else:
self.hdconv_blocks.append(nn.Sequential(*[
DBDownsample(out_channels, out_channels, ks=4, activation='elu', norm='sn',activation_first=False),
DBUpsample(out_channels, out_channels, ks=4, activation='elu', norm='sn',activation_first=False)
]))
in_channels = out_channels
for d in range(depth-1):
self.hddeconv_blocks.append(nn.Sequential(*[
DBDownsample(out_chs[d], out_chs[d], ks=4, activation='elu', norm='sn',activation_first=False),
DBUpsample(out_chs[d], out_chs[d], ks=4, activation='elu', norm='sn',activation_first=False)
]))
out_channels = out_chs[0]
if self.image_fusion:
self.conv_attention = nn.Conv2d(out_channels, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(out_channels, 3, 1,1,0)
def forward(self, lr_output, hr_mask, hr_comp, extra_feat=None, lut_hr_output=None, target_resolution=(1024,1024)):
up_output = F.interpolate(lr_output, size=target_resolution, mode='bilinear')
extra_feat = F.interpolate(extra_feat, size=target_resolution, mode='bilinear') # C=32
im_all = torch.cat([up_output,lut_hr_output,hr_mask],dim=1)
im_all = im_all.contiguous()
hx = self.in_conv(im_all) + self.feat_conv(extra_feat)
skips = []
# print(len(self.hdconv_blocks),len(self.hddeconv_blocks))
for block in self.hdconv_blocks:
hx = block(hx)
skips.append(hx)
# print(hx.shape)
prev_out = skips.pop()
for block in self.hddeconv_blocks[::-1]:
prev_out = F.interpolate(prev_out, size=skips[-1].shape[2:], mode='bilinear')
skip = skips.pop()
block_in = prev_out + skip
prev_out = block(block_in)
hx = prev_out
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(hx))
output = attention_map * hr_comp + (1.0 - attention_map) * self.to_rgb(hx)
else:
output = self.to_rgb(hx)
return output
class DBPNet_official(nn.Module):
def __init__(self, num_channels=3, base_filter=64, feat=256, image_fusion=True):
super(DBPNet_official, self).__init__()
kernel = 6
stride = 2
padding = 2
self.image_fusion = image_fusion
#Initial Feature Extraction
self.in_conv = ConvBlock(7, feat, 3, 1, 1, activation=nn.PReLU, norm_layer=None)
self.fea_conv = ConvBlock(32, feat, 3, 1, 1, activation=nn.PReLU, norm_layer=None)
self.feat1 = ConvBlock(feat, base_filter, 1, 1, 0, activation=nn.PReLU, norm_layer=None)
#Back-projection stages
self.up1 = UpBlock(base_filter, kernel, stride, padding)
self.down1 = DownBlock(base_filter, kernel, stride, padding)
#Reconstruction
if self.image_fusion:
self.conv_attention = nn.Conv2d(base_filter, 1, kernel_size=1)
self.to_rgb = nn.Conv2d(base_filter, 3, 1,1,0)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('ConvTranspose2d') != -1:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, lr_output, hr_mask, hr_comp, extra_feat=None, lut_hr_output=None, target_resolution=(1024,1024)):
upsampled_output = F.interpolate(lr_output, size=target_resolution, mode='bilinear')
extra_feat = F.interpolate(extra_feat, size=target_resolution, mode='bilinear') # C=32
images = torch.cat((upsampled_output, lut_hr_output, hr_mask),1)
x = self.in_conv(images)+self.fea_conv(extra_feat)
x = self.feat1(x)
h1 = self.up1(x)
d1 = self.down1(h1)
if self.image_fusion:
attention_map = torch.sigmoid(3.0 * self.conv_attention(d1))
output = attention_map * hr_comp + (1.0 - attention_map) * self.to_rgb(d1)
else:
output = self.to_rgb(d1)
return output
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )
class spatial_att_refine(nn.Module):
def __init__(self):
super(spatial_att_refine, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, lr_output, hr_mask, hr_comp, extra_feat=None, lut_hr_output=None, target_resolution=(1024,1024)):
upsampled_output = F.interpolate(lr_output, size=target_resolution, mode='bilinear')
rrt_images = torch.cat((upsampled_output, hr_mask),1)
ppt_images = torch.cat((lut_hr_output, hr_mask),1)
rrt_compress = self.compress(rrt_images)
rrt_out = self.spatial(rrt_compress)
rrt_scale = F.sigmoid(rrt_out) # broadcasting
ppt_compress = self.compress(ppt_images)
ppt_out = self.spatial(ppt_compress)
ppt_scale = F.sigmoid(ppt_out)
output = upsampled_output*rrt_scale+lut_hr_output*ppt_scale
return output
| 11,477 | 45.469636 | 154 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/inference/predictor.py | import torch
from iharm.inference.transforms import NormalizeTensor, PadToDivisor, ToTensor, AddFlippedTensor
class Predictor(object):
def __init__(self, net, device, with_flip=False,
mean=(.485, .456, .406), std=(.229, .224, .225)):
self.device = device
self.net = net.to(self.device)
self.net.eval()
if hasattr(net, 'depth'):
size_divisor = 2 ** (net.depth + 1)
else:
size_divisor = 1
mean = torch.tensor(mean, dtype=torch.float32)
std = torch.tensor(std, dtype=torch.float32)
self.transforms = [
PadToDivisor(divisor=size_divisor, border_mode=0),
ToTensor(self.device),
NormalizeTensor(mean, std, self.device),
]
if with_flip:
self.transforms.append(AddFlippedTensor())
def predict(self, image, mask, return_numpy=True):
with torch.no_grad():
for transform in self.transforms:
image, mask = transform.transform(image, mask)
predicted_image = self.net(image, mask)['images']
for transform in reversed(self.transforms):
predicted_image = transform.inv_transform(predicted_image)
predicted_image = torch.clamp(predicted_image, 0, 255)
if return_numpy:
return predicted_image.cpu().numpy()
else:
return predicted_image
| 1,432 | 32.325581 | 96 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/inference/evaluation.py | from time import time
from tqdm import trange
import torch
def evaluate_dataset(dataset, predictor, metrics_hub):
for sample_i in trange(len(dataset), desc=f'Testing on {metrics_hub.name}'):
sample = dataset.get_sample(sample_i)
sample = dataset.augment_sample(sample)
sample_mask = sample['object_mask']
predict_start = time()
pred = predictor.predict(sample['image'], sample_mask, return_numpy=False)
torch.cuda.synchronize()
metrics_hub.update_time(time() - predict_start)
target_image = torch.as_tensor(sample['target_image'], dtype=torch.float32).to(predictor.device)
sample_mask = torch.as_tensor(sample_mask, dtype=torch.float32).to(predictor.device)
with torch.no_grad():
metrics_hub.compute_and_add(pred, target_image, sample_mask)
| 841 | 39.095238 | 104 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/iharm/inference/transforms.py | import cv2
import torch
from collections import namedtuple
class EvalTransform:
def __init__(self):
pass
def transform(self, image, mask):
raise NotImplementedError
def inv_transform(self, image):
raise NotImplementedError
class PadToDivisor(EvalTransform):
"""
Pad side of the image so that its side is divisible by divisor.
Args:
divisor (int): desirable image size divisor
border_mode (OpenCV flag): OpenCV border mode.
fill_value (int, float, list of int, lisft of float): padding value if border_mode is cv2.BORDER_CONSTANT.
"""
PadParams = namedtuple('PadParams', ['top', 'bottom', 'left', 'right'])
def __init__(self, divisor, border_mode=cv2.BORDER_CONSTANT, fill_value=0):
super().__init__()
self.border_mode = border_mode
self.fill_value = fill_value
self.divisor = divisor
self._pads = None
def transform(self, image, mask):
self._pads = PadToDivisor.PadParams(*self._get_dim_padding(image.shape[0]),
*self._get_dim_padding(image.shape[1]))
image = cv2.copyMakeBorder(image, *self._pads, self.border_mode, value=self.fill_value)
mask = cv2.copyMakeBorder(mask, *self._pads, self.border_mode, value=self.fill_value)
return image, mask
def inv_transform(self, image):
assert self._pads is not None,\
'Something went wrong, inv_transform(...) should be called after transform(...)'
return self._remove_padding(image)
def _get_dim_padding(self, dim_size):
pad = (self.divisor - dim_size % self.divisor) % self.divisor
pad_upper = pad // 2
pad_lower = pad - pad_upper
return pad_upper, pad_lower
def _remove_padding(self, tensor):
tensor_h, tensor_w = tensor.shape[:2]
cropped = tensor[self._pads.top:tensor_h - self._pads.bottom,
self._pads.left:tensor_w - self._pads.right, :]
return cropped
class NormalizeTensor(EvalTransform):
def __init__(self, mean, std, device):
super().__init__()
self.mean = torch.as_tensor(mean).reshape(1, 3, 1, 1).to(device)
self.std = torch.as_tensor(std).reshape(1, 3, 1, 1).to(device)
def transform(self, image, mask):
image.sub_(self.mean).div_(self.std)
return image, mask
def inv_transform(self, image):
image.mul_(self.std).add_(self.mean)
return image
class ToTensor(EvalTransform):
def __init__(self, device):
super().__init__()
self.device = device
def transform(self, image, mask):
image = torch.as_tensor(image, device=self.device, dtype=torch.float32)
mask = torch.as_tensor(mask, device=self.device)
image.unsqueeze_(0)
mask.unsqueeze_(0).unsqueeze_(0)
return image.permute(0, 3, 1, 2) / 255.0, mask
def inv_transform(self, image):
image.squeeze_(0)
return 255 * image.permute(1, 2, 0)
class AddFlippedTensor(EvalTransform):
def transform(self, image, mask):
flipped_image = torch.flip(image, dims=(3,))
flipped_mask = torch.flip(mask, dims=(3,))
image = torch.cat((image, flipped_image), dim=0)
mask = torch.cat((mask, flipped_mask), dim=0)
return image, mask
def inv_transform(self, image):
return 0.5 * (image[:1] + torch.flip(image[1:], dims=(3,)))
| 3,462 | 31.980952 | 114 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/tridistribute/setup.py | from setuptools import setup
import torch
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
if torch.cuda.is_available():
print('Including CUDA code.')
setup(
name='tridistribute',
ext_modules=[
CUDAExtension('tridistribute', [
'src/tridistribute_cuda.cpp',
'src/tridistribute_kernel.cu',
])
],
cmdclass={
'build_ext': BuildExtension
})
else:
print('NO CUDA is found. Fall back to CPU.')
setup(name='tridistribute',
ext_modules=[CppExtension('tridistribute', ['src/tridistribute.cpp'])],
cmdclass={'build_ext': BuildExtension})
| 701 | 29.521739 | 81 | py |
Video-Harmonization-Dataset-HYouTube | Video-Harmonization-Dataset-HYouTube-master/CO2Net/trilinear/setup.py | from setuptools import setup
import torch
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
if torch.cuda.is_available():
print('Including CUDA code.')
setup(
name='trilinear',
ext_modules=[
CUDAExtension('trilinear', [
'src/trilinear_cuda.cpp',
'src/trilinear_kernel.cu',
])
],
cmdclass={
'build_ext': BuildExtension
})
else:
print('NO CUDA is found. Fall back to CPU.')
setup(name='trilinear',
ext_modules=[CppExtension('trilinear', ['src/trilinear.cpp'])],
cmdclass={'build_ext': BuildExtension})
| 673 | 28.304348 | 81 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.