repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
AT-on-AD | AT-on-AD-main/test_cifar_vgg.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class CifarNet(nn.Module):
def __init__(self, vgg_name):
super(CifarNet, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 2)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = CifarDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarNet(vgg_name='VGG11')
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 4,759 | 29.709677 | 186 | py |
AT-on-AD | AT-on-AD-main/test_syn.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], self.y[index]
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--data-type', type=str, default='syn',
choices=['syn', 'cauchy', 'cauchy_2', 'levy_1.5'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
if args.data_type == 'syn':
folder_main = 'record'
else:
folder_main = f'record_{args.data_type}'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join(f'data_{args.data_type}', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = Dataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 100
num_classes = 2
model = MyLR(input_size, num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,879 | 28.846154 | 182 | py |
AT-on-AD | AT-on-AD-main/train_syn.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], self.y[index]
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--data-type', type=str, default='syn',
choices=['syn', 'cauchy', 'cauchy_2', 'levy_1.5'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 5. may add support for lr scheduler
# 1. set up logging
args = parse_args()
if args.data_type == 'syn':
folder_main = 'record'
else:
folder_main = f'record_{args.data_type}'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join(f'data_{args.data_type}', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = Dataset(X_train, y_train)
val_set = Dataset(X_val, y_val)
test_set = Dataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 100
num_classes = 2
model = MyLR(input_size, num_classes).cuda()
criterion = nn.CrossEntropyLoss()
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
if args.adv:
if args.norm_type == 'linf':
adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=None, clip_max=None)
elif args.norm_type == 'l2':
adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=None, clip_max=None)
best_val_loss = 1e10
best_model = None
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 6,544 | 29.584112 | 182 | py |
AT-on-AD | AT-on-AD-main/process_data.py | import argparse
import numpy as np
import os
import os.path as osp
import torch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='syn',
choices=['syn', 'mnist', 'cifar', 'fmnist', 'cauchy', 'cauchy_2', 'levy_1.5'])
return parser.parse_args()
def parse(s, mode='val'):
# print(s)
s1 = f'{mode}_correct = '
s2 = f'{mode}_correct_class = ['
s3 = f'{mode}_total = '
s4 = f'{mode}_loss = '
p1 = s.find(s1)
p2 = s.find(s2)
p3 = s.find(s3)
p4 = s.find(s4)
# print(p1, p2, p3, p4)
val1 = int(s[p1+len(s1): p2-2])
s_sub = s[p2+len(s2): p3-3].split(',')
val2 = int(s_sub[0])
val3 = int(s_sub[1])
val4 = int(s[p3+len(s3): p4-2])
val5 = float(s[p4+len(s4):])
return (val1, val2, val3, val4), val5
epoch = 500
bs = 64
R_list = [1,2,5,10]
seed_list = [2,22,42,62,82]
lr_std_list = [0.1, 0.05, 0.01, 0.005, 0.001,0.0005,0.0002,0.0001]
lr_adv_list = [0.1, 0.05, 0.01, 0.005, 0.001,0.0005,0.0002,0.0001]
total_imb_class = np.asarray([[1000,1000],[1000,500],[1000,200],[1000,100]])
total_b_class = np.asarray([[1000,1000],[1000,1000],[1000,1000],[1000,1000]])
total_all = np.asarray([2000, 1500, 1200, 1100])
normtype_list = ['l2', 'linf']
args = parse_args()
if args.dataset == 'syn':
suffix = ''
norm_scale_list = {'l2': [5.0, 3.75, 2.5], 'linf': [1.0, 0.75, 0.5]} # synthetic dataset
elif args.dataset == 'cauchy':
suffix = '_cauchy'
norm_scale_list = {'l2': [23.425, 17.5688, 11.7125], 'linf': [14.403, 10.8023, 7.2015]} # synthetic dataset
elif args.dataset == 'cauchy_2':
suffix = '_cauchy_2'
norm_scale_list = {'l2': [70.0, 52.5, 35.0], 'linf': [53.0, 39.75, 26.5]}
elif args.dataset == 'levy_1.5':
suffix = '_levy_1.5'
norm_scale_list = {'l2': [2.94, 2.205, 1.47], 'linf': [0.555, 0.4163, 0.2775]}
elif args.dataset == 'mnist':
suffix = '_mnist'
norm_scale_list = {'l2': [2.6956,2.0217,1.3478], 'linf': [0.4341, 0.3256, 0.2171]} #
elif args.dataset == 'cifar':
suffix = '_cifar'
# norm_scale_list = {'l2': [0.7822, 0.5867, 0.3911, ], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
# norm_scale_list = {'l2': [2.3468, 1.7601, 1.1734, ], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
# norm_scale_list = {'l2': [1.7601, 1.1734, 0.7822], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
# norm_scale_list = {'l2': [1.1734, 0.9778, 0.7822], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
norm_scale_list = {'l2': [0.9778, 0.8800, 0.7822], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
lr_adv_list = [0.005, 0.001,0.0005,0.0002,0.0001]
elif args.dataset == 'fmnist':
suffix = '_fmnist'
norm_scale_list = {'l2': [3.2306,2.4230,1.6153], 'linf': [0.2889,0.2166,0.1444]}
total_imb_class = np.asarray([[1000,1000],[500,1000],[200,1000],[100,1000]])
else:
raise NotImplementedError(f'dataset = {args.dataset} not implemented!')
# 1. collect raw data for std training
len_lr_std = len(lr_std_list)
len_lr_adv = len(lr_adv_list)
val_result = np.zeros((4,5,len_lr_std,4), dtype=np.int64)
test_imb_result = np.zeros((4,5,len_lr_std,4), dtype=np.int64)
test_b_result = np.zeros((4,5,len_lr_std,4), dtype=np.int64)
val_loss = np.zeros((4,5,len_lr_std))
test_imb_loss = np.zeros((4,5,len_lr_std))
test_b_loss = np.zeros((4,5,len_lr_std))
for i, R in enumerate(R_list):
for k, seed in enumerate(seed_list):
for j, lr in enumerate(lr_std_list):
file_name = osp.join(osp.join(f'record{suffix}', f'train-std_R-{R}_lr-{lr}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'train.log')
print(i,j,k, file_name)
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-3], 'val')
val_result[i][k][j] = r1
val_loss[i][k][j] = r2
r1, r2 = parse(lines[-2], 'test')
test_imb_result[i][k][j] = r1
test_imb_loss[i][k][j] = r2
file_name = osp.join(osp.join(f'record{suffix}', f'train-std_R-{R}_lr-{lr}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'test.log')
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-1], 'test')
test_b_result[i][k][j] = r1
test_b_loss[i][k][j] = r2
indmin = np.argmin(val_loss, axis=-1)
def get_std_accs(indmin, test_result, total_class):
acc_class = np.zeros((4,5,2))
correct_class = np.zeros((4,5,2), dtype=np.int64)
acc = np.zeros((4, 5))
for k in range(4):
for j in range(5):
v = indmin[k][j]
print(test_result[k][j][v])
print(test_result[k][j][v][1:-1] / total_class[k])
print(test_result[k][j][v][0] / total_all[k])
correct_class[k][j] = test_result[k][j][v][1:-1]
acc_class[k][j] = test_result[k][j][v][1:-1] / total_class[k]
acc[k][j] = test_result[k][j][v][0] / total_all[k]
return acc_class, correct_class, acc
acc_imb_class, correct_imb_class, acc_imb = get_std_accs(indmin, test_imb_result, total_imb_class)
acc_b_class, correct_b_class, acc_b = get_std_accs(indmin, test_b_result, total_b_class)
# 2. adversarial training
adv_val_result = np.zeros((2,3,4,5,len_lr_adv,4), dtype=np.int64)
adv_test_imb_result = np.zeros((2,3,4,5,len_lr_adv,4), dtype=np.int64)
adv_test_b_result = np.zeros((2,3,4,5,len_lr_adv,4), dtype=np.int64)
adv_val_loss = np.zeros((2,3,4,5,len_lr_adv))
adv_test_imb_loss = np.zeros((2,3,4,5,len_lr_adv))
adv_test_b_loss = np.zeros((2,3,4,5,len_lr_adv))
for k, normtype in enumerate(normtype_list):
for l, normscale in enumerate(norm_scale_list[normtype]):
for i, R in enumerate(R_list):
for p, seed in enumerate(seed_list):
for j, lr in enumerate(lr_adv_list):
print(k,l,i,p,j)
file_name = osp.join(osp.join(f'record{suffix}', f'train-adv_R-{R}_lr-{lr}_normtype-{normtype}_normscale-{normscale}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'train.log')
print(file_name)
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-3], 'val')
adv_val_result[k][l][i][p][j] = r1
adv_val_loss[k][l][i][p][j] = r2
r1, r2 = parse(lines[-2], 'test')
adv_test_imb_result[k][l][i][p][j] = r1
adv_test_imb_loss[k][l][i][p][j] = r2
file_name = osp.join(osp.join(f'record{suffix}', f'train-adv_R-{R}_lr-{lr}_normtype-{normtype}_normscale-{normscale}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'test.log')
print(file_name)
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-1], 'test')
adv_test_b_result[k][l][i][p][j] = r1
adv_test_b_loss[k][l][i][p][j] = r2
indmin_adv = np.argmin(adv_val_loss, axis=-1)
def get_adv_accs(indmin_adv, adv_test_result, total_class):
adv_acc_class = np.zeros((2,3,4,5,2))
adv_correct_class = np.zeros((2,3,4,5,2), dtype=np.int64)
adv_acc = np.zeros((2,3,4,5))
for i in range(2):
for j in range(3):
for k in range(4):
for l in range(5):
v = indmin_adv[i][j][k][l]
print(adv_test_result[i][j][k][l][v])
print(adv_test_result[i][j][k][l][v][1:-1] / total_class[k])
print(adv_test_result[i][j][k][l][v][0] / total_all[k])
adv_correct_class[i][j][k][l] = adv_test_result[i][j][k][l][v][1:-1]
adv_acc_class[i][j][k][l] = adv_test_result[i][j][k][l][v][1:-1] / total_class[k]
adv_acc[i][j][k][l] = adv_test_result[i][j][k][l][v][0] / total_all[k]
return adv_acc_class, adv_correct_class, adv_acc
adv_acc_imb_class, adv_correct_imb_class, adv_acc_imb = get_adv_accs(indmin_adv, adv_test_imb_result, total_imb_class)
adv_acc_b_class, adv_correct_b_class, adv_acc_b = get_adv_accs(indmin_adv, adv_test_b_result, total_b_class)
# 3. compute AD
ad_class = acc_b_class[:,:,0] - acc_b_class[:,:,1]
adv_ad_class = adv_acc_b_class[:,:,:,:,0] - adv_acc_b_class[:,:,:,:,1]
if args.dataset == 'fmnist':
ad_class = -ad_class
adv_ad_class = -adv_ad_class
# diff = np.abs(ad_class - adv_ad_class)
diff = adv_ad_class - ad_class
diff_ave = np.mean(diff, axis=-1)
diff_std = np.std(diff, axis=-1) / np.sqrt(5)
print('diff_ave', diff_ave)
print('diff_std', diff_std)
# 4. compute ACC diff
diff_acc = acc_imb - adv_acc_imb
diff_acc_ave = np.mean(diff_acc, axis=-1)
diff_acc_std = np.std(diff_acc, axis=-1) / np.sqrt(5)
# 4. write to disk
write_dir = f'result{suffix}'
if not osp.exists(write_dir):
os.makedirs(write_dir)
write_path = osp.join(write_dir, 'all_result.pth')
torch.save({
'val_result': val_result,
'val_loss': val_loss,
'test_imb_result': test_imb_result,
'test_b_result': test_b_result,
'test_imb_loss': test_imb_loss,
'test_b_loss': test_b_loss,
'adv_val_result': adv_val_result,
'adv_val_loss': adv_val_loss,
'adv_test_imb_result': adv_test_imb_result,
'adv_test_b_result': adv_test_b_result,
'adv_test_imb_loss': adv_test_imb_loss,
'adv_test_b_loss': adv_test_b_loss,
'correct_imb_class': correct_imb_class,
'correct_b_class': correct_b_class,
'acc_imb_class': acc_imb_class,
'acc_b_class': acc_b_class,
'acc_imb': acc_imb,
'acc_b': acc_b,
'ad_class': ad_class,
'adv_correct_imb_class': adv_correct_imb_class,
'adv_correct_b_class': adv_correct_b_class,
'adv_acc_imb_class': adv_acc_imb_class,
'adv_acc_b_class': adv_acc_b_class,
'adv_acc_imb': adv_acc_imb,
'adv_acc_b': adv_acc_b,
'adv_ad_class': adv_ad_class,
'indmin': indmin,
'indmin_adv': indmin_adv,
'diff': diff,
'diff_ave': diff_ave,
'diff_std': diff_std,
'diff_acc': diff_acc,
'diff_acc_ave': diff_acc_ave,
'diff_acc_std': diff_acc_std,
}, write_path)
| 10,126 | 33.56314 | 186 | py |
AT-on-AD | AT-on-AD-main/test_cifar.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class CifarLR(nn.Module):
def __init__(self, input_size, hidden=200, num_classes=2):
super(CifarLR, self).__init__()
self.fc1 = nn.Linear(input_size, hidden)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden, num_classes)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = CifarDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarLR(input_size, hidden=args.hidden, num_classes=num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 3072).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,914 | 28.43609 | 182 | py |
AT-on-AD | AT-on-AD-main/test_mnist.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_mnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_mnist', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = MnistDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,683 | 28.007874 | 182 | py |
AT-on-AD | AT-on-AD-main/train_fmnist.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_fmnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_fmnist', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = MnistDataset(X_train, y_train)
val_set = MnistDataset(X_val, y_val)
test_set = MnistDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
if args.adv:
if args.norm_type == 'linf':
adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
elif args.norm_type == 'l2':
adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = LinfPGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
# elif args.norm_type == 'l2':
# adversary = L2PGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=50, eps_iter=args.norm_scale/50*5/4, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.view(-1, 784).cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 6,889 | 30.176471 | 182 | py |
AT-on-AD | AT-on-AD-main/train_cifar_vgg.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class CifarNet(nn.Module):
def __init__(self, vgg_name):
super(CifarNet, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 2)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = CifarDataset(X_train, y_train)
val_set = CifarDataset(X_val, y_val)
test_set = CifarDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarNet(vgg_name='VGG11').cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# elif args.norm_type == 'l2':
# adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
if args.adv:
if args.norm_type == 'linf':
adversary = LinfPGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
elif args.norm_type == 'l2':
adversary = L2PGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=50, eps_iter=args.norm_scale/50*2.5, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 7,950 | 31.060484 | 186 | py |
AT-on-AD | AT-on-AD-main/train_cifar.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class CifarLR(nn.Module):
def __init__(self, input_size, hidden=200, num_classes=2):
super(CifarLR, self).__init__()
self.fc1 = nn.Linear(input_size, hidden)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden, num_classes)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = CifarDataset(X_train, y_train)
val_set = CifarDataset(X_val, y_val)
test_set = CifarDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarLR(input_size, hidden=args.hidden, num_classes=num_classes).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 3072).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# elif args.norm_type == 'l2':
# adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
if args.adv:
if args.norm_type == 'linf':
adversary = LinfPGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
elif args.norm_type == 'l2':
adversary = L2PGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=50, eps_iter=args.norm_scale/50*2.5, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.view(-1, 3072).cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 7,120 | 30.50885 | 182 | py |
AT-on-AD | AT-on-AD-main/train_mnist.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_mnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_mnist', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = MnistDataset(X_train, y_train)
val_set = MnistDataset(X_val, y_val)
test_set = MnistDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
if args.adv:
if args.norm_type == 'linf':
adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
elif args.norm_type == 'l2':
adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = LinfPGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
# elif args.norm_type == 'l2':
# adversary = L2PGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=50, eps_iter=args.norm_scale/50*5/4, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.view(-1, 784).cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 6,887 | 30.167421 | 182 | py |
aidgn | aidgn-main/domainbed/command_launchers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A command launcher launches a list of commands on a cluster; implement your own
launcher to add support for your cluster. We've provided an example launcher
which runs all commands serially on the local machine.
"""
import subprocess
import time
import torch
def local_launcher(commands):
"""Launch commands serially on the local machine."""
for cmd in commands:
subprocess.call(cmd, shell=True)
def dummy_launcher(commands):
"""
Doesn't run anything; instead, prints each command.
Useful for testing.
"""
for cmd in commands:
print(f'Dummy launcher: {cmd}')
def multi_gpu_launcher(commands):
"""
Launch commands on the local machine, using all GPUs in parallel.
"""
print('WARNING: using experimental multi_gpu_launcher.')
n_gpus = torch.cuda.device_count()-4
procs_by_gpu = [None]*n_gpus
while len(commands) > 0:
for gpu_idx in range(n_gpus):
proc = procs_by_gpu[gpu_idx]
if (proc is None) or (proc.poll() is not None):
# Nothing is running on this GPU; launch a command.
cmd = commands.pop(0)
new_proc = subprocess.Popen(
f'CUDA_VISIBLE_DEVICES={gpu_idx+4} {cmd}', shell=True)
procs_by_gpu[gpu_idx] = new_proc
break
time.sleep(1)
# Wait for the last few tasks to finish before returning
for p in procs_by_gpu:
if p is not None:
p.wait()
REGISTRY = {
'local': local_launcher,
'dummy': dummy_launcher,
'multi_gpu': multi_gpu_launcher
}
try:
from domainbed import facebook
facebook.register_command_launchers(REGISTRY)
except ImportError:
pass
| 1,792 | 27.919355 | 79 | py |
aidgn | aidgn-main/domainbed/networks.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models
import math
from domainbed.lib import wide_resnet
import copy
def remove_batch_norm_from_resnet(model):
fuse = torch.nn.utils.fusion.fuse_conv_bn_eval
model.eval()
model.conv1 = fuse(model.conv1, model.bn1)
model.bn1 = Identity()
for name, module in model.named_modules():
if name.startswith("layer") and len(name) == 6:
for b, bottleneck in enumerate(module):
for name2, module2 in bottleneck.named_modules():
if name2.startswith("conv"):
bn_name = "bn" + name2[-1]
setattr(bottleneck, name2,
fuse(module2, getattr(bottleneck, bn_name)))
setattr(bottleneck, bn_name, Identity())
if isinstance(bottleneck.downsample, torch.nn.Sequential):
bottleneck.downsample[0] = fuse(bottleneck.downsample[0],
bottleneck.downsample[1])
bottleneck.downsample[1] = Identity()
model.train()
return model
class Identity(nn.Module):
"""An identity layer"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class MLP(nn.Module):
"""Just an MLP"""
def __init__(self, n_inputs, n_outputs, hparams):
super(MLP, self).__init__()
self.input = nn.Linear(n_inputs, hparams['mlp_width'])
self.dropout = nn.Dropout(hparams['mlp_dropout'])
self.hiddens = nn.ModuleList([
nn.Linear(hparams['mlp_width'], hparams['mlp_width'])
for _ in range(hparams['mlp_depth']-2)])
self.output = nn.Linear(hparams['mlp_width'], n_outputs)
self.n_outputs = n_outputs
def forward(self, x):
x = self.input(x)
x = self.dropout(x)
x = F.relu(x)
for hidden in self.hiddens:
x = hidden(x)
x = self.dropout(x)
x = F.relu(x)
x = self.output(x)
return x
class ResNet(torch.nn.Module):
"""ResNet with the softmax chopped off and the batchnorm frozen"""
def __init__(self, input_shape, hparams):
super(ResNet, self).__init__()
if hparams['resnet18']:
self.network = torchvision.models.resnet18(pretrained=True)
self.n_outputs = 512
else:
self.network = torchvision.models.resnet50(pretrained=True)
self.n_outputs = 2048
# self.network = remove_batch_norm_from_resnet(self.network)
# adapt number of channels
nc = input_shape[0]
if nc != 3:
tmp = self.network.conv1.weight.data.clone()
self.network.conv1 = nn.Conv2d(
nc, 64, kernel_size=(7, 7),
stride=(2, 2), padding=(3, 3), bias=False)
for i in range(nc):
self.network.conv1.weight.data[:, i, :, :] = tmp[:, i % 3, :, :]
# save memory
del self.network.fc
self.network.fc = Identity()
self.freeze_bn()
self.hparams = hparams
self.dropout = nn.Dropout(hparams['resnet_dropout'])
def forward(self, x):
"""Encode x into a feature vector of size n_outputs."""
return self.dropout(self.network(x))
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super().train(mode)
self.freeze_bn()
def freeze_bn(self):
for m in self.network.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
class MNIST_CNN(nn.Module):
"""
Hand-tuned architecture for MNIST.
Weirdness I've noticed so far with this architecture:
- adding a linear layer after the mean-pool in features hurts
RotatedMNIST-100 generalization severely.
"""
n_outputs = 128
def __init__(self, input_shape):
super(MNIST_CNN, self).__init__()
self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1)
self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1)
self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1)
self.bn0 = nn.GroupNorm(8, 64)
self.bn1 = nn.GroupNorm(8, 128)
self.bn2 = nn.GroupNorm(8, 128)
self.bn3 = nn.GroupNorm(8, 128)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.bn0(x)
x = self.conv2(x)
x = F.relu(x)
x = self.bn1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.bn2(x)
x = self.conv4(x)
x = F.relu(x)
x = self.bn3(x)
x = self.avgpool(x)
x = x.view(len(x), -1)
return x
class ContextNet(nn.Module):
def __init__(self, input_shape):
super(ContextNet, self).__init__()
# Keep same dimensions
padding = (5 - 1) // 2
self.context_net = nn.Sequential(
nn.Conv2d(input_shape[0], 64, 5, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 5, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 1, 5, padding=padding),
)
def forward(self, x):
return self.context_net(x)
def Featurizer(input_shape, hparams):
"""Auto-select an appropriate featurizer for the given input shape."""
if len(input_shape) == 1:
return MLP(input_shape[0], hparams["mlp_width"], hparams)
elif input_shape[1:3] == (28, 28):
return MNIST_CNN(input_shape)
elif input_shape[1:3] == (32, 32):
return wide_resnet.Wide_ResNet(input_shape, 16, 2, 0.)
elif input_shape[1:3] == (224, 224):
return ResNet(input_shape, hparams)
else:
raise NotImplementedError
def Classifier(in_features, out_features, is_nonlinear=False):
if is_nonlinear:
return torch.nn.Sequential(
torch.nn.Linear(in_features, in_features // 2),
torch.nn.ReLU(),
torch.nn.Linear(in_features // 2, in_features // 4),
torch.nn.ReLU(),
torch.nn.Linear(in_features // 4, out_features))
else:
return torch.nn.Linear(in_features, out_features)
class WholeFish(nn.Module):
def __init__(self, input_shape, num_classes, hparams, weights=None):
super(WholeFish, self).__init__()
featurizer = Featurizer(input_shape, hparams)
classifier = Classifier(
featurizer.n_outputs,
num_classes,
hparams['nonlinear_classifier'])
self.net = nn.Sequential(
featurizer, classifier
)
if weights is not None:
self.load_state_dict(copy.deepcopy(weights))
def reset_weights(self, weights):
self.load_state_dict(copy.deepcopy(weights))
def forward(self, x):
return self.net(x)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
| 7,258 | 30.154506 | 80 | py |
aidgn | aidgn-main/domainbed/datasets.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
from PIL import Image, ImageFile
from torchvision import transforms
import torchvision.datasets.folder
from torch.utils.data import TensorDataset, Subset
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms.functional import rotate
from wilds.datasets.camelyon17_dataset import Camelyon17Dataset
from wilds.datasets.fmow_dataset import FMoWDataset
ImageFile.LOAD_TRUNCATED_IMAGES = True
DATASETS = [
# Debug
"Debug28",
"Debug224",
# Small images
"ColoredMNIST",
"RotatedMNIST",
# Big images
"VLCS",
"PACS",
"OfficeHome",
"TerraIncognita",
"DomainNet",
"SVIRO",
# WILDS datasets
"WILDSCamelyon",
"WILDSFMoW"
]
def get_dataset_class(dataset_name):
"""Return the dataset class with the given name."""
if dataset_name not in globals():
raise NotImplementedError("Dataset not found: {}".format(dataset_name))
return globals()[dataset_name]
def num_environments(dataset_name):
return len(get_dataset_class(dataset_name).ENVIRONMENTS)
class MultipleDomainDataset:
N_STEPS = 5001 # Default, subclasses may override
CHECKPOINT_FREQ = 100 # Default, subclasses may override
N_WORKERS = 8 # Default, subclasses may override
ENVIRONMENTS = None # Subclasses should override
INPUT_SHAPE = None # Subclasses should override
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class Debug(MultipleDomainDataset):
def __init__(self, root, test_envs, hparams):
super().__init__()
self.input_shape = self.INPUT_SHAPE
self.num_classes = 2
self.datasets = []
for _ in [0, 1, 2]:
self.datasets.append(
TensorDataset(
torch.randn(16, *self.INPUT_SHAPE),
torch.randint(0, self.num_classes, (16,))
)
)
class Debug28(Debug):
INPUT_SHAPE = (3, 28, 28)
ENVIRONMENTS = ['0', '1', '2']
class Debug224(Debug):
INPUT_SHAPE = (3, 224, 224)
ENVIRONMENTS = ['0', '1', '2']
class MultipleEnvironmentMNIST(MultipleDomainDataset):
def __init__(self, root, environments, dataset_transform, input_shape,
num_classes):
super().__init__()
if root is None:
raise ValueError('Data directory not specified!')
original_dataset_tr = MNIST(root, train=True, download=True)
original_dataset_te = MNIST(root, train=False, download=True)
original_images = torch.cat((original_dataset_tr.data,
original_dataset_te.data))
original_labels = torch.cat((original_dataset_tr.targets,
original_dataset_te.targets))
shuffle = torch.randperm(len(original_images))
original_images = original_images[shuffle]
original_labels = original_labels[shuffle]
self.datasets = []
for i in range(len(environments)):
images = original_images[i::len(environments)]
labels = original_labels[i::len(environments)]
self.datasets.append(dataset_transform(images, labels, environments[i]))
self.input_shape = input_shape
self.num_classes = num_classes
class ColoredMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['+90%', '+80%', '-90%']
def __init__(self, root, test_envs, hparams):
super(ColoredMNIST, self).__init__(root, [0.1, 0.2, 0.9],
self.color_dataset, (2, 28, 28,), 2)
self.input_shape = (2, 28, 28,)
self.num_classes = 2
def color_dataset(self, images, labels, environment):
# # Subsample 2x for computational convenience
# images = images.reshape((-1, 28, 28))[:, ::2, ::2]
# Assign a binary label based on the digit
labels = (labels < 5).float()
# Flip label with probability 0.25
labels = self.torch_xor_(labels,
self.torch_bernoulli_(0.25, len(labels)))
# Assign a color based on the label; flip the color with probability e
colors = self.torch_xor_(labels,
self.torch_bernoulli_(environment,
len(labels)))
images = torch.stack([images, images], dim=1)
# Apply the color to the image by zeroing out the other color channel
images[torch.tensor(range(len(images))), (
1 - colors).long(), :, :] *= 0
x = images.float().div_(255.0)
y = labels.view(-1).long()
return TensorDataset(x, y)
def torch_bernoulli_(self, p, size):
return (torch.rand(size) < p).float()
def torch_xor_(self, a, b):
return (a - b).abs()
class RotatedMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']
def __init__(self, root, test_envs, hparams):
super(RotatedMNIST, self).__init__(root, [0, 15, 30, 45, 60, 75],
self.rotate_dataset, (1, 28, 28,), 10)
def rotate_dataset(self, images, labels, angle):
rotation = transforms.Compose([
transforms.ToPILImage(),
transforms.Lambda(lambda x: rotate(x, angle, fill=(0,),
interpolation=torchvision.transforms.InterpolationMode.BILINEAR)),
transforms.ToTensor()])
x = torch.zeros(len(images), 1, 28, 28)
for i in range(len(images)):
x[i] = rotation(images[i])
y = labels.view(-1)
return TensorDataset(x, y)
class EImageFolder(ImageFolder):
def __init__(self, root, transform, domain_label):
super(EImageFolder, self).__init__(root, transform)
self.domain_label = domain_label
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
sample = self.transform(sample)
return sample, target, self.domain_label
class MultipleEnvironmentImageFolder(MultipleDomainDataset):
def __init__(self, root, test_envs, augment, hparams):
super().__init__()
environments = [f.name for f in os.scandir(root) if f.is_dir()]
environments = sorted(environments)
transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
# transforms.Resize((224,224)),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
domain_index = 0
for i, environment in enumerate(environments):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
path = os.path.join(root, environment)
if i not in test_envs:
env_dataset = EImageFolder(path, env_transform, domain_index)
domain_index += 1
else:
env_dataset = EImageFolder(path, env_transform, -1)
# if hparams['mDSDI']:
# if i not in test_envs:
# env_dataset = EImageFolder(path, env_transform, domain_index)
# domain_index += 1
# else:
# env_dataset = EImageFolder(path, env_transform, -1)
# else:
# env_dataset = ImageFolder(path, env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = len(self.datasets[-1].classes)
class VLCS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 50
N_STEPS = 5000
N_WORKERS = 6
ENVIRONMENTS = ["C", "L", "S", "V"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "VLCS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class PACS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 50
N_STEPS = 5000
N_WORKERS = 6
ENVIRONMENTS = ["A", "C", "P", "S"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "PACS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class DomainNet(MultipleEnvironmentImageFolder):
N_STEPS = 15000
CHECKPOINT_FREQ = 500
N_WORKERS = 6
ENVIRONMENTS = ["clip", "info", "paint", "quick", "real", "sketch"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "DomainNet/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class OfficeHome(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 100
N_STEPS = 5001
N_WORKERS = 6
ENVIRONMENTS = ["A", "C", "P", "R"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "OfficeHome/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class TerraIncognita(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 100
N_WORKERS = 6
N_STEPS = 5001
ENVIRONMENTS = ["L100", "L38", "L43", "L46"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "terra_incognita/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class SVIRO(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 50
ENVIRONMENTS = ["aclass", "escape", "hilux", "i3", "lexus", "tesla", "tiguan", "tucson", "x5", "zoe"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "sviro/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class WILDSEnvironment:
def __init__(
self,
wilds_dataset,
metadata_name,
metadata_value,
transform=None):
self.name = metadata_name + "_" + str(metadata_value)
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_array = wilds_dataset.metadata_array
subset_indices = torch.where(
metadata_array[:, metadata_index] == metadata_value)[0]
self.dataset = wilds_dataset
self.indices = subset_indices
self.transform = transform
def __getitem__(self, i):
x = self.dataset.get_input(self.indices[i])
if type(x).__name__ != "Image":
x = Image.fromarray(x)
y = self.dataset.y_array[self.indices[i]]
if self.transform is not None:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.indices)
class WILDSDataset(MultipleDomainDataset):
INPUT_SHAPE = (3, 224, 224)
def __init__(self, dataset, metadata_name, test_envs, augment, hparams):
super().__init__()
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
for i, metadata_value in enumerate(
self.metadata_values(dataset, metadata_name)):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
env_dataset = WILDSEnvironment(
dataset, metadata_name, metadata_value, env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = dataset.n_classes
def metadata_values(self, wilds_dataset, metadata_name):
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_vals = wilds_dataset.metadata_array[:, metadata_index]
return sorted(list(set(metadata_vals.view(-1).tolist())))
class WILDSCamelyon(WILDSDataset):
ENVIRONMENTS = [ "hospital_0", "hospital_1", "hospital_2", "hospital_3",
"hospital_4"]
def __init__(self, root, test_envs, hparams):
dataset = Camelyon17Dataset(root_dir=root)
super().__init__(
dataset, "hospital", test_envs, hparams['data_augmentation'], hparams)
class WILDSFMoW(WILDSDataset):
ENVIRONMENTS = [ "region_0", "region_1", "region_2", "region_3",
"region_4", "region_5"]
def __init__(self, root, test_envs, hparams):
dataset = FMoWDataset(root_dir=root)
super().__init__(
dataset, "region", test_envs, hparams['data_augmentation'], hparams)
| 13,608 | 33.805627 | 105 | py |
aidgn | aidgn-main/domainbed/algorithms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
import math
import copy
import numpy as np
from collections import defaultdict
from domainbed import networks
from domainbed.lib.misc import random_pairs_of_minibatches, ParamDict
ALGORITHMS = [
'ERM',
'AIDGN',
'Fish',
'IRM',
'GroupDRO',
'Mixup',
'MLDG',
'CORAL',
'MMD',
'DANN',
'CDANN',
'MTL',
'SagNet',
'ARM',
'VREx',
'RSC',
'SD',
'ANDMask',
'IGA',
'SelfReg',
'MDSDI'
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
return globals()[algorithm_name]
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a domain generalization algorithm.
Subclasses should implement the following:
- update()
- predict()
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Algorithm, self).__init__()
self.hparams = hparams
def update(self, step, minibatches, unlabeled=None):
"""
Perform one update step, given a list of (x, y) tuples for all
environments.
Admits an optional list of unlabeled minibatches from the test domains,
when task is domain_adaptation.
"""
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def magnitude(self, x):
raise NotImplementedError
class ERM(Algorithm):
"""
Empirical Risk Minimization (ERM)
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(ERM, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
self.network = nn.Sequential(self.featurizer, self.classifier)
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams['lr'],
weight_decay=self.hparams['weight_decay']
)
def update(self, step, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y,z in minibatches])
all_y = torch.cat([y for x,y,z in minibatches])
all_z = torch.cat([z for x,y,z in minibatches])
loss = F.cross_entropy(self.predict(all_x), all_y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
def predict(self, x):
return self.network(x)
class AIClassifier(torch.nn.Module):
def __init__(self, feat_dim, num_class, kappa=110, l_a=10, u_a=410, gamma=0.001, beta=0.3, eta=15):
super(AIClassifier, self).__init__()
self.weight = torch.nn.Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.scale = kappa
self.l_a = l_a
self.u_a = u_a
self.gamma = gamma
self.beta = beta
self.eta = eta
def calc_perturb(self, x, domain_mean):
pb = self.gamma * (x - self.l_a + self.beta * domain_mean) + 0.4
return pb
def predict(self, feats):
weight_norm = F.normalize(self.weight, dim=0)
feats_norm = F.normalize(feats)
cos_theta = torch.mm(feats_norm, weight_norm)
return cos_theta
def forward(self, feats, labels, ratio_h, domain_mean):
x_norm = torch.norm(feats, dim=1, keepdim=True).clamp(self.l_a, self.u_a)
pb = self.calc_perturb(x_norm, domain_mean)
cos_m, sin_m = torch.cos(pb), torch.sin(pb)
loss_kl = 1 / ((ratio_h * self.u_a) ** 2) * x_norm + 1 / (x_norm)
weight_norm = F.normalize(self.weight, dim=0)
feats = F.normalize(feats)
cos_theta = torch.mm(feats, weight_norm)
cos_theta = cos_theta.clamp(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(cos_theta, 2))
cos_theta_m = cos_theta * cos_m - sin_theta * sin_m
min_cos_theta = torch.cos(math.pi - pb)
cos_theta_m = torch.where(cos_theta > min_cos_theta, cos_theta_m, cos_theta)
index = torch.zeros_like(cos_theta)
index.scatter_(1, labels.data.view(-1, 1), 1)
index = index.byte().bool()
output = cos_theta * 1.0
output[index] = cos_theta_m[index]
output *= self.scale
return output, self.eta * loss_kl
class AIDGN(Algorithm):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(AIDGN, self).__init__(input_shape, num_classes, num_domains, hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = AIClassifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['kappa'],
self.hparams['l_a'],
self.hparams['u_a'],
self.hparams['gamma'],
self.hparams['beta'],
self.hparams['eta']
)
self.optimizer = torch.optim.Adam(
list(self.featurizer.parameters()) + list(self.classifier.parameters()),
lr=self.hparams['lr'],
weight_decay=self.hparams['weight_decay']
)
self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones = [6000,12000],
gamma = 0.5
)
self.num_domains = num_domains
self.mean_mags = 10000 * torch.ones(num_domains).cuda()
self.batch_size = self.hparams['batch_size']
def predict(self, x):
f = self.featurizer(x)
y = self.classifier.predict(f)
return y
def magnitude(self, x):
f = self.featurizer(x)
f_norm = torch.norm(f, dim=1)
return f_norm
def update(self, step, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y,z in minibatches])
all_y = torch.cat([y for x,y,z in minibatches])
all_z = torch.cat([z for x,y,z in minibatches])
f = self.featurizer(all_x)
mags = torch.norm(f, dim=1)
for i in range(0, self.num_domains):
mags_i = mags[i * self.batch_size : (i+1) * self.batch_size]
self.mean_mags[i] = torch.mean(mags_i, dim=0)
with torch.no_grad():
domain_mean = self.mean_mags[all_z]
ratio = mags.detach() / domain_mean
ones = torch.ones_like(ratio)
ratio_h = torch.where(ratio>self.hparams['ratiothreshold'], ratio, ones)
with torch.no_grad():
domain_mean = self.mean_mags[all_z]
domain_mean = domain_mean.reshape(domain_mean.size(0),1).detach()
logits, loss_kl = self.classifier.forward(f, all_y, ratio_h, domain_mean)
loss_kl = torch.mean(loss_kl)
loss_c = F.cross_entropy(logits, all_y)
loss = loss_c + loss_kl
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item(), 'loss_c': loss_c.item(), 'loss_kl': loss_kl.item()}
class Fish(Algorithm):
"""
Implementation of Fish, as seen in Gradient Matching for Domain
Generalization, Shi et al. 2021.
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Fish, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.input_shape = input_shape
self.num_classes = num_classes
self.network = networks.WholeFish(input_shape, num_classes, hparams)
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
self.optimizer_inner_state = None
def create_clone(self, device):
self.network_inner = networks.WholeFish(self.input_shape, self.num_classes, self.hparams,
weights=self.network.state_dict()).to(device)
self.optimizer_inner = torch.optim.Adam(
self.network_inner.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
if self.optimizer_inner_state is not None:
self.optimizer_inner.load_state_dict(self.optimizer_inner_state)
def fish(self, meta_weights, inner_weights, lr_meta):
meta_weights = ParamDict(meta_weights)
inner_weights = ParamDict(inner_weights)
meta_weights += lr_meta * (inner_weights - meta_weights)
return meta_weights
def update(self, minibatches, unlabeled=None):
self.create_clone(minibatches[0][0].device)
for x, y in minibatches:
loss = F.cross_entropy(self.network_inner(x), y)
self.optimizer_inner.zero_grad()
loss.backward()
self.optimizer_inner.step()
self.optimizer_inner_state = self.optimizer_inner.state_dict()
meta_weights = self.fish(
meta_weights=self.network.state_dict(),
inner_weights=self.network_inner.state_dict(),
lr_meta=self.hparams["meta_lr"]
)
self.network.reset_weights(meta_weights)
return {'loss': loss.item()}
def predict(self, x):
return self.network(x)
class ARM(ERM):
""" Adaptive Risk Minimization (ARM) """
def __init__(self, input_shape, num_classes, num_domains, hparams):
original_input_shape = input_shape
input_shape = (1 + original_input_shape[0],) + original_input_shape[1:]
super(ARM, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.context_net = networks.ContextNet(original_input_shape)
self.support_size = hparams['batch_size']
def predict(self, x):
batch_size, c, h, w = x.shape
if batch_size % self.support_size == 0:
meta_batch_size = batch_size // self.support_size
support_size = self.support_size
else:
meta_batch_size, support_size = 1, batch_size
context = self.context_net(x)
context = context.reshape((meta_batch_size, support_size, 1, h, w))
context = context.mean(dim=1)
context = torch.repeat_interleave(context, repeats=support_size, dim=0)
x = torch.cat([x, context], dim=1)
return self.network(x)
class AbstractDANN(Algorithm):
"""Domain-Adversarial Neural Networks (abstract class)"""
def __init__(self, input_shape, num_classes, num_domains,
hparams, conditional, class_balance):
super(AbstractDANN, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer('update_count', torch.tensor([0]))
self.conditional = conditional
self.class_balance = class_balance
# Algorithms
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
self.discriminator = networks.MLP(self.featurizer.n_outputs,
num_domains, self.hparams)
self.class_embeddings = nn.Embedding(num_classes,
self.featurizer.n_outputs)
# Optimizers
self.disc_opt = torch.optim.Adam(
(list(self.discriminator.parameters()) +
list(self.class_embeddings.parameters())),
lr=self.hparams["lr_d"],
weight_decay=self.hparams['weight_decay_d'],
betas=(self.hparams['beta1'], 0.9))
self.gen_opt = torch.optim.Adam(
(list(self.featurizer.parameters()) +
list(self.classifier.parameters())),
lr=self.hparams["lr_g"],
weight_decay=self.hparams['weight_decay_g'],
betas=(self.hparams['beta1'], 0.9))
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
self.update_count += 1
all_x = torch.cat([x for x, y in minibatches])
all_y = torch.cat([y for x, y in minibatches])
all_z = self.featurizer(all_x)
if self.conditional:
disc_input = all_z + self.class_embeddings(all_y)
else:
disc_input = all_z
disc_out = self.discriminator(disc_input)
disc_labels = torch.cat([
torch.full((x.shape[0], ), i, dtype=torch.int64, device=device)
for i, (x, y) in enumerate(minibatches)
])
if self.class_balance:
y_counts = F.one_hot(all_y).sum(dim=0)
weights = 1. / (y_counts[all_y] * y_counts.shape[0]).float()
disc_loss = F.cross_entropy(disc_out, disc_labels, reduction='none')
disc_loss = (weights * disc_loss).sum()
else:
disc_loss = F.cross_entropy(disc_out, disc_labels)
disc_softmax = F.softmax(disc_out, dim=1)
input_grad = autograd.grad(disc_softmax[:, disc_labels].sum(),
[disc_input], create_graph=True)[0]
grad_penalty = (input_grad**2).sum(dim=1).mean(dim=0)
disc_loss += self.hparams['grad_penalty'] * grad_penalty
d_steps_per_g = self.hparams['d_steps_per_g_step']
if (self.update_count.item() % (1+d_steps_per_g) < d_steps_per_g):
self.disc_opt.zero_grad()
disc_loss.backward()
self.disc_opt.step()
return {'disc_loss': disc_loss.item()}
else:
all_preds = self.classifier(all_z)
classifier_loss = F.cross_entropy(all_preds, all_y)
gen_loss = (classifier_loss +
(self.hparams['lambda'] * -disc_loss))
self.disc_opt.zero_grad()
self.gen_opt.zero_grad()
gen_loss.backward()
self.gen_opt.step()
return {'gen_loss': gen_loss.item()}
def predict(self, x):
return self.classifier(self.featurizer(x))
class DANN(AbstractDANN):
"""Unconditional DANN"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(DANN, self).__init__(input_shape, num_classes, num_domains,
hparams, conditional=False, class_balance=False)
class CDANN(AbstractDANN):
"""Conditional DANN"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(CDANN, self).__init__(input_shape, num_classes, num_domains,
hparams, conditional=True, class_balance=True)
class IRM(ERM):
"""Invariant Risk Minimization"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(IRM, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer('update_count', torch.tensor([0]))
@staticmethod
def _irm_penalty(logits, y):
device = "cuda" if logits[0][0].is_cuda else "cpu"
scale = torch.tensor(1.).to(device).requires_grad_()
loss_1 = F.cross_entropy(logits[::2] * scale, y[::2])
loss_2 = F.cross_entropy(logits[1::2] * scale, y[1::2])
grad_1 = autograd.grad(loss_1, [scale], create_graph=True)[0]
grad_2 = autograd.grad(loss_2, [scale], create_graph=True)[0]
result = torch.sum(grad_1 * grad_2)
return result
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
penalty_weight = (self.hparams['irm_lambda'] if self.update_count
>= self.hparams['irm_penalty_anneal_iters'] else
1.0)
nll = 0.
penalty = 0.
all_x = torch.cat([x for x,y in minibatches])
all_logits = self.network(all_x)
all_logits_idx = 0
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
nll += F.cross_entropy(logits, y)
penalty += self._irm_penalty(logits, y)
nll /= len(minibatches)
penalty /= len(minibatches)
loss = nll + (penalty_weight * penalty)
if self.update_count == self.hparams['irm_penalty_anneal_iters']:
# Reset Adam, because it doesn't like the sharp jump in gradient
# magnitudes that happens at this step.
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay'])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.update_count += 1
return {'loss': loss.item(), 'nll': nll.item(),
'penalty': penalty.item()}
class VREx(ERM):
"""V-REx algorithm from http://arxiv.org/abs/2003.00688"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(VREx, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer('update_count', torch.tensor([0]))
def update(self, minibatches, unlabeled=None):
if self.update_count >= self.hparams["vrex_penalty_anneal_iters"]:
penalty_weight = self.hparams["vrex_lambda"]
else:
penalty_weight = 1.0
nll = 0.
all_x = torch.cat([x for x, y in minibatches])
all_logits = self.network(all_x)
all_logits_idx = 0
losses = torch.zeros(len(minibatches))
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
nll = F.cross_entropy(logits, y)
losses[i] = nll
mean = losses.mean()
penalty = ((losses - mean) ** 2).mean()
loss = mean + penalty_weight * penalty
if self.update_count == self.hparams['vrex_penalty_anneal_iters']:
# Reset Adam (like IRM), because it doesn't like the sharp jump in
# gradient magnitudes that happens at this step.
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay'])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.update_count += 1
return {'loss': loss.item(), 'nll': nll.item(),
'penalty': penalty.item()}
class Mixup(ERM):
"""
Mixup of minibatches from different domains
https://arxiv.org/pdf/2001.00677.pdf
https://arxiv.org/pdf/1912.01805.pdf
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Mixup, self).__init__(input_shape, num_classes, num_domains,
hparams)
def update(self, minibatches, unlabeled=None):
objective = 0
for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
lam = np.random.beta(self.hparams["mixup_alpha"],
self.hparams["mixup_alpha"])
x = lam * xi + (1 - lam) * xj
predictions = self.predict(x)
objective += lam * F.cross_entropy(predictions, yi)
objective += (1 - lam) * F.cross_entropy(predictions, yj)
objective /= len(minibatches)
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
return {'loss': objective.item()}
class GroupDRO(ERM):
"""
Robust ERM minimizes the error at the worst minibatch
Algorithm 1 from [https://arxiv.org/pdf/1911.08731.pdf]
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(GroupDRO, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer("q", torch.Tensor())
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
if not len(self.q):
self.q = torch.ones(len(minibatches)).to(device)
losses = torch.zeros(len(minibatches)).to(device)
for m in range(len(minibatches)):
x, y = minibatches[m]
losses[m] = F.cross_entropy(self.predict(x), y)
self.q[m] *= (self.hparams["groupdro_eta"] * losses[m].data).exp()
self.q /= self.q.sum()
loss = torch.dot(losses, self.q)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
class MLDG(ERM):
"""
Model-Agnostic Meta-Learning
Algorithm 1 / Equation (3) from: https://arxiv.org/pdf/1710.03463.pdf
Related: https://arxiv.org/pdf/1703.03400.pdf
Related: https://arxiv.org/pdf/1910.13580.pdf
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MLDG, self).__init__(input_shape, num_classes, num_domains,
hparams)
def update(self, minibatches, unlabeled=None):
"""
Terms being computed:
* Li = Loss(xi, yi, params)
* Gi = Grad(Li, params)
* Lj = Loss(xj, yj, Optimizer(params, grad(Li, params)))
* Gj = Grad(Lj, params)
* params = Optimizer(params, Grad(Li + beta * Lj, params))
* = Optimizer(params, Gi + beta * Gj)
That is, when calling .step(), we want grads to be Gi + beta * Gj
For computational efficiency, we do not compute second derivatives.
"""
num_mb = len(minibatches)
objective = 0
self.optimizer.zero_grad()
for p in self.network.parameters():
if p.grad is None:
p.grad = torch.zeros_like(p)
for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
# fine tune clone-network on task "i"
inner_net = copy.deepcopy(self.network)
inner_opt = torch.optim.Adam(
inner_net.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
inner_obj = F.cross_entropy(inner_net(xi), yi)
inner_opt.zero_grad()
inner_obj.backward()
inner_opt.step()
# The network has now accumulated gradients Gi
# The clone-network has now parameters P - lr * Gi
for p_tgt, p_src in zip(self.network.parameters(),
inner_net.parameters()):
if p_src.grad is not None:
p_tgt.grad.data.add_(p_src.grad.data / num_mb)
# `objective` is populated for reporting purposes
objective += inner_obj.item()
# this computes Gj on the clone-network
loss_inner_j = F.cross_entropy(inner_net(xj), yj)
grad_inner_j = autograd.grad(loss_inner_j, inner_net.parameters(),
allow_unused=True)
# `objective` is populated for reporting purposes
objective += (self.hparams['mldg_beta'] * loss_inner_j).item()
for p, g_j in zip(self.network.parameters(), grad_inner_j):
if g_j is not None:
p.grad.data.add_(
self.hparams['mldg_beta'] * g_j.data / num_mb)
# The network has now accumulated gradients Gi + beta * Gj
# Repeat for all train-test splits, do .step()
objective /= len(minibatches)
self.optimizer.step()
return {'loss': objective}
# This commented "update" method back-propagates through the gradients of
# the inner update, as suggested in the original MAML paper. However, this
# is twice as expensive as the uncommented "update" method, which does not
# compute second-order derivatives, implementing the First-Order MAML
# method (FOMAML) described in the original MAML paper.
# def update(self, minibatches, unlabeled=None):
# objective = 0
# beta = self.hparams["beta"]
# inner_iterations = self.hparams["inner_iterations"]
# self.optimizer.zero_grad()
# with higher.innerloop_ctx(self.network, self.optimizer,
# copy_initial_weights=False) as (inner_network, inner_optimizer):
# for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
# for inner_iteration in range(inner_iterations):
# li = F.cross_entropy(inner_network(xi), yi)
# inner_optimizer.step(li)
#
# objective += F.cross_entropy(self.network(xi), yi)
# objective += beta * F.cross_entropy(inner_network(xj), yj)
# objective /= len(minibatches)
# objective.backward()
#
# self.optimizer.step()
#
# return objective
class AbstractMMD(ERM):
"""
Perform ERM while matching the pair-wise domain feature distributions
using MMD (abstract class)
"""
def __init__(self, input_shape, num_classes, num_domains, hparams, gaussian):
super(AbstractMMD, self).__init__(input_shape, num_classes, num_domains,
hparams)
if gaussian:
self.kernel_type = "gaussian"
else:
self.kernel_type = "mean_cov"
def my_cdist(self, x1, x2):
x1_norm = x1.pow(2).sum(dim=-1, keepdim=True)
x2_norm = x2.pow(2).sum(dim=-1, keepdim=True)
res = torch.addmm(x2_norm.transpose(-2, -1),
x1,
x2.transpose(-2, -1), alpha=-2).add_(x1_norm)
return res.clamp_min_(1e-30)
def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100,
1000]):
D = self.my_cdist(x, y)
K = torch.zeros_like(D)
for g in gamma:
K.add_(torch.exp(D.mul(-g)))
return K
def mmd(self, x, y):
if self.kernel_type == "gaussian":
Kxx = self.gaussian_kernel(x, x).mean()
Kyy = self.gaussian_kernel(y, y).mean()
Kxy = self.gaussian_kernel(x, y).mean()
return Kxx + Kyy - 2 * Kxy
else:
mean_x = x.mean(0, keepdim=True)
mean_y = y.mean(0, keepdim=True)
cent_x = x - mean_x
cent_y = y - mean_y
cova_x = (cent_x.t() @ cent_x) / (len(x) - 1)
cova_y = (cent_y.t() @ cent_y) / (len(y) - 1)
mean_diff = (mean_x - mean_y).pow(2).mean()
cova_diff = (cova_x - cova_y).pow(2).mean()
return mean_diff + cova_diff
def update(self, minibatches, unlabeled=None):
objective = 0
penalty = 0
nmb = len(minibatches)
features = [self.featurizer(xi) for xi, _ in minibatches]
classifs = [self.classifier(fi) for fi in features]
targets = [yi for _, yi in minibatches]
for i in range(nmb):
objective += F.cross_entropy(classifs[i], targets[i])
for j in range(i + 1, nmb):
penalty += self.mmd(features[i], features[j])
objective /= nmb
if nmb > 1:
penalty /= (nmb * (nmb - 1) / 2)
self.optimizer.zero_grad()
(objective + (self.hparams['mmd_gamma']*penalty)).backward()
self.optimizer.step()
if torch.is_tensor(penalty):
penalty = penalty.item()
return {'loss': objective.item(), 'penalty': penalty}
class MMD(AbstractMMD):
"""
MMD using Gaussian kernel
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MMD, self).__init__(input_shape, num_classes,
num_domains, hparams, gaussian=True)
class CORAL(AbstractMMD):
"""
MMD using mean and covariance difference
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(CORAL, self).__init__(input_shape, num_classes,
num_domains, hparams, gaussian=False)
class MTL(Algorithm):
"""
A neural network version of
Domain Generalization by Marginal Transfer Learning
(https://arxiv.org/abs/1711.07910)
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MTL, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs * 2,
num_classes,
self.hparams['nonlinear_classifier'])
self.optimizer = torch.optim.Adam(
list(self.featurizer.parameters()) +\
list(self.classifier.parameters()),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
self.register_buffer('embeddings',
torch.zeros(num_domains,
self.featurizer.n_outputs))
self.ema = self.hparams['mtl_ema']
def update(self, minibatches, unlabeled=None):
loss = 0
for env, (x, y) in enumerate(minibatches):
loss += F.cross_entropy(self.predict(x, env), y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
def update_embeddings_(self, features, env=None):
return_embedding = features.mean(0)
if env is not None:
return_embedding = self.ema * return_embedding +\
(1 - self.ema) * self.embeddings[env]
self.embeddings[env] = return_embedding.clone().detach()
return return_embedding.view(1, -1).repeat(len(features), 1)
def predict(self, x, env=None):
features = self.featurizer(x)
embedding = self.update_embeddings_(features, env).normal_()
return self.classifier(torch.cat((features, embedding), 1))
class SagNet(Algorithm):
"""
Style Agnostic Network
Algorithm 1 from: https://arxiv.org/abs/1910.11645
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(SagNet, self).__init__(input_shape, num_classes, num_domains,
hparams)
# featurizer network
self.network_f = networks.Featurizer(input_shape, self.hparams)
# content network
self.network_c = networks.Classifier(
self.network_f.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
# style network
self.network_s = networks.Classifier(
self.network_f.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
# # This commented block of code implements something closer to the
# # original paper, but is specific to ResNet and puts in disadvantage
# # the other algorithms.
# resnet_c = networks.Featurizer(input_shape, self.hparams)
# resnet_s = networks.Featurizer(input_shape, self.hparams)
# # featurizer network
# self.network_f = torch.nn.Sequential(
# resnet_c.network.conv1,
# resnet_c.network.bn1,
# resnet_c.network.relu,
# resnet_c.network.maxpool,
# resnet_c.network.layer1,
# resnet_c.network.layer2,
# resnet_c.network.layer3)
# # content network
# self.network_c = torch.nn.Sequential(
# resnet_c.network.layer4,
# resnet_c.network.avgpool,
# networks.Flatten(),
# resnet_c.network.fc)
# # style network
# self.network_s = torch.nn.Sequential(
# resnet_s.network.layer4,
# resnet_s.network.avgpool,
# networks.Flatten(),
# resnet_s.network.fc)
def opt(p):
return torch.optim.Adam(p, lr=hparams["lr"],
weight_decay=hparams["weight_decay"])
self.optimizer_f = opt(self.network_f.parameters())
self.optimizer_c = opt(self.network_c.parameters())
self.optimizer_s = opt(self.network_s.parameters())
self.weight_adv = hparams["sag_w_adv"]
def forward_c(self, x):
# learning content network on randomized style
return self.network_c(self.randomize(self.network_f(x), "style"))
def forward_s(self, x):
# learning style network on randomized content
return self.network_s(self.randomize(self.network_f(x), "content"))
def randomize(self, x, what="style", eps=1e-5):
device = "cuda" if x.is_cuda else "cpu"
sizes = x.size()
alpha = torch.rand(sizes[0], 1).to(device)
if len(sizes) == 4:
x = x.view(sizes[0], sizes[1], -1)
alpha = alpha.unsqueeze(-1)
mean = x.mean(-1, keepdim=True)
var = x.var(-1, keepdim=True)
x = (x - mean) / (var + eps).sqrt()
idx_swap = torch.randperm(sizes[0])
if what == "style":
mean = alpha * mean + (1 - alpha) * mean[idx_swap]
var = alpha * var + (1 - alpha) * var[idx_swap]
else:
x = x[idx_swap].detach()
x = x * (var + eps).sqrt() + mean
return x.view(*sizes)
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x, y in minibatches])
all_y = torch.cat([y for x, y in minibatches])
# learn content
self.optimizer_f.zero_grad()
self.optimizer_c.zero_grad()
loss_c = F.cross_entropy(self.forward_c(all_x), all_y)
loss_c.backward()
self.optimizer_f.step()
self.optimizer_c.step()
# learn style
self.optimizer_s.zero_grad()
loss_s = F.cross_entropy(self.forward_s(all_x), all_y)
loss_s.backward()
self.optimizer_s.step()
# learn adversary
self.optimizer_f.zero_grad()
loss_adv = -F.log_softmax(self.forward_s(all_x), dim=1).mean(1).mean()
loss_adv = loss_adv * self.weight_adv
loss_adv.backward()
self.optimizer_f.step()
return {'loss_c': loss_c.item(), 'loss_s': loss_s.item(),
'loss_adv': loss_adv.item()}
def predict(self, x):
return self.network_c(self.network_f(x))
class RSC(ERM):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(RSC, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.drop_f = (1 - hparams['rsc_f_drop_factor']) * 100
self.drop_b = (1 - hparams['rsc_b_drop_factor']) * 100
self.num_classes = num_classes
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
# inputs
all_x = torch.cat([x for x, y in minibatches])
# labels
all_y = torch.cat([y for _, y in minibatches])
# one-hot labels
all_o = torch.nn.functional.one_hot(all_y, self.num_classes)
# features
all_f = self.featurizer(all_x)
# predictions
all_p = self.classifier(all_f)
# Equation (1): compute gradients with respect to representation
all_g = autograd.grad((all_p * all_o).sum(), all_f)[0]
# Equation (2): compute top-gradient-percentile mask
percentiles = np.percentile(all_g.cpu(), self.drop_f, axis=1)
percentiles = torch.Tensor(percentiles)
percentiles = percentiles.unsqueeze(1).repeat(1, all_g.size(1))
mask_f = all_g.lt(percentiles.to(device)).float()
# Equation (3): mute top-gradient-percentile activations
all_f_muted = all_f * mask_f
# Equation (4): compute muted predictions
all_p_muted = self.classifier(all_f_muted)
# Section 3.3: Batch Percentage
all_s = F.softmax(all_p, dim=1)
all_s_muted = F.softmax(all_p_muted, dim=1)
changes = (all_s * all_o).sum(1) - (all_s_muted * all_o).sum(1)
percentile = np.percentile(changes.detach().cpu(), self.drop_b)
mask_b = changes.lt(percentile).float().view(-1, 1)
mask = torch.logical_or(mask_f, mask_b).float()
# Equations (3) and (4) again, this time mutting over examples
all_p_muted_again = self.classifier(all_f * mask)
# Equation (5): update
loss = F.cross_entropy(all_p_muted_again, all_y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
class SD(ERM):
"""
Gradient Starvation: A Learning Proclivity in Neural Networks
Equation 25 from [https://arxiv.org/pdf/2011.09468.pdf]
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(SD, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.sd_reg = hparams["sd_reg"]
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y in minibatches])
all_y = torch.cat([y for x,y in minibatches])
all_p = self.predict(all_x)
loss = F.cross_entropy(all_p, all_y)
penalty = (all_p ** 2).mean()
objective = loss + self.sd_reg * penalty
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
return {'loss': loss.item(), 'penalty': penalty.item()}
class ANDMask(ERM):
"""
Learning Explanations that are Hard to Vary [https://arxiv.org/abs/2009.00329]
AND-Mask implementation from [https://github.com/gibipara92/learning-explanations-hard-to-vary]
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(ANDMask, self).__init__(input_shape, num_classes, num_domains, hparams)
self.tau = hparams["tau"]
def update(self, minibatches, unlabeled=None):
total_loss = 0
param_gradients = [[] for _ in self.network.parameters()]
all_x = torch.cat([x for x,y in minibatches])
all_logits = self.network(all_x)
all_logits_idx = 0
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
env_loss = F.cross_entropy(logits, y)
total_loss += env_loss
env_grads = autograd.grad(env_loss, self.network.parameters(), retain_graph=True)
for grads, env_grad in zip(param_gradients, env_grads):
grads.append(env_grad)
mean_loss = total_loss / len(minibatches)
self.optimizer.zero_grad()
self.mask_grads(self.tau, param_gradients, self.network.parameters())
self.optimizer.step()
return {'loss': mean_loss.item()}
def mask_grads(self, tau, gradients, params):
for param, grads in zip(params, gradients):
grads = torch.stack(grads, dim=0)
grad_signs = torch.sign(grads)
mask = torch.mean(grad_signs, dim=0).abs() >= self.tau
mask = mask.to(torch.float32)
avg_grad = torch.mean(grads, dim=0)
mask_t = (mask.sum() / mask.numel())
param.grad = mask * avg_grad
param.grad *= (1. / (1e-10 + mask_t))
return 0
class IGA(ERM):
"""
Inter-environmental Gradient Alignment
From https://arxiv.org/abs/2008.01883v2
"""
def __init__(self, in_features, num_classes, num_domains, hparams):
super(IGA, self).__init__(in_features, num_classes, num_domains, hparams)
def update(self, minibatches, unlabeled=False):
all_x = torch.cat([x for x,y in minibatches])
all_logits = self.network(all_x)
total_loss = 0
all_logits_idx = 0
grads = []
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
env_loss = F.cross_entropy(logits, y)
total_loss += env_loss
env_grad = autograd.grad(env_loss, self.network.parameters(),
create_graph=True)
grads.append(env_grad)
mean_loss = total_loss / len(minibatches)
mean_grad = autograd.grad(mean_loss, self.network.parameters(),
create_graph=True)
# compute trace penalty
penalty_value = 0
for grad in grads:
for g, mean_g in zip(grad, mean_grad):
penalty_value += (g - mean_g).pow(2).sum()
objective = mean_loss + self.hparams['penalty'] * penalty_value
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
return {'loss': mean_loss.item(), 'penalty': penalty_value.item()}
class SelfReg(ERM):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(SelfReg, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.num_classes = num_classes
self.MSEloss = nn.MSELoss()
input_feat_size = self.featurizer.n_outputs
hidden_size = input_feat_size if input_feat_size==2048 else input_feat_size*2
self.cdpl = nn.Sequential(
nn.Linear(input_feat_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, input_feat_size),
nn.BatchNorm1d(input_feat_size)
)
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x, y in minibatches])
all_y = torch.cat([y for _, y in minibatches])
lam = np.random.beta(0.5, 0.5)
batch_size = all_y.size()[0]
# cluster and order features into same-class group
with torch.no_grad():
sorted_y, indices = torch.sort(all_y)
sorted_x = torch.zeros_like(all_x)
for idx, order in enumerate(indices):
sorted_x[idx] = all_x[order]
intervals = []
ex = 0
for idx, val in enumerate(sorted_y):
if ex==val:
continue
intervals.append(idx)
ex = val
intervals.append(batch_size)
all_x = sorted_x
all_y = sorted_y
feat = self.featurizer(all_x)
proj = self.cdpl(feat)
output = self.classifier(feat)
# shuffle
output_2 = torch.zeros_like(output)
feat_2 = torch.zeros_like(proj)
output_3 = torch.zeros_like(output)
feat_3 = torch.zeros_like(proj)
ex = 0
for end in intervals:
shuffle_indices = torch.randperm(end-ex)+ex
shuffle_indices2 = torch.randperm(end-ex)+ex
for idx in range(end-ex):
output_2[idx+ex] = output[shuffle_indices[idx]]
feat_2[idx+ex] = proj[shuffle_indices[idx]]
output_3[idx+ex] = output[shuffle_indices2[idx]]
feat_3[idx+ex] = proj[shuffle_indices2[idx]]
ex = end
# mixup
output_3 = lam*output_2 + (1-lam)*output_3
feat_3 = lam*feat_2 + (1-lam)*feat_3
# regularization
L_ind_logit = self.MSEloss(output, output_2)
L_hdl_logit = self.MSEloss(output, output_3)
L_ind_feat = 0.3 * self.MSEloss(feat, feat_2)
L_hdl_feat = 0.3 * self.MSEloss(feat, feat_3)
cl_loss = F.cross_entropy(output, all_y)
C_scale = min(cl_loss.item(), 1.)
loss = cl_loss + C_scale*(lam*(L_ind_logit + L_ind_feat)+(1-lam)*(L_hdl_logit + L_hdl_feat))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return -grad_output
class Domain_Discriminator(nn.Module):
def __init__(self, feature_dim, domain_classes):
super(Domain_Discriminator, self).__init__()
self.class_classifier = nn.Sequential(
nn.Linear(feature_dim, feature_dim),
nn.ReLU(),
nn.Linear(feature_dim, feature_dim),
nn.ReLU(),
nn.Linear(feature_dim, domain_classes)
)
def forward(self, di_z):
y = self.class_classifier(GradReverse.apply(di_z))
return y
class Classifier(nn.Module):
def __init__(self, feature_dim, classes):
super(Classifier, self).__init__()
self.classifier = nn.Linear(int(feature_dim * 2), classes)
def forward(self, di_z, ds_z):
z = torch.cat((di_z, ds_z), dim = 1)
y = self.classifier(z)
return y
class ZS_Domain_Classifier(nn.Module):
def __init__(self, feature_dim, domain_classes):
super(ZS_Domain_Classifier, self).__init__()
self.class_classifier = nn.Sequential(
nn.Linear(feature_dim, domain_classes)
)
def forward(self, ds_z):
y = self.class_classifier(ds_z)
return y
class MDSDI(Algorithm):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MDSDI, self).__init__(input_shape, num_classes, num_domains, hparams)
self.di_featurizer = networks.Featurizer(input_shape, self.hparams)
self.ds_featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.di_featurizer.n_outputs + self.ds_featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
self.domain_discriminator = Domain_Discriminator(self.di_featurizer.n_outputs, num_domains)
self.domain_classifier = ZS_Domain_Classifier(self.ds_featurizer.n_outputs, num_domains)
optimizer_params = list(self.di_featurizer.parameters()) + list(self.ds_featurizer.parameters()) + list(self.classifier.parameters()) + list(self.domain_discriminator.parameters()) + list(self.domain_classifier.parameters())
self.optimizer = torch.optim.Adam(
optimizer_params,
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
meta_optimizer_params = list(self.ds_featurizer.parameters()) + list(self.classifier.parameters())
self.meta_optimizer = torch.optim.Adam(
meta_optimizer_params,
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y,z in minibatches])
all_y = torch.cat([y for x,y,z in minibatches])
all_z = torch.cat([z for x,y,z in minibatches])
di_z, ds_z = self.di_featurizer(all_x), self.ds_featurizer(all_x)
# Distangle by Covariance Matrix
mdi_z = torch.mean(di_z, 0)
mds_z = torch.mean(ds_z, 0)
di_z_n = (di_z - mdi_z[None, :])
ds_z_n = (ds_z - mds_z[None, :])
C = di_z_n[:, :, None] * ds_z_n[:,None,:]
target_cr = torch.zeros(C.shape[0], C.shape[1], C.shape[2]).cuda()
disentangle_loss = nn.MSELoss()(C, target_cr)
di_predicted_domain = self.domain_discriminator(di_z)
predicted_domain_di_loss = F.cross_entropy(di_predicted_domain, all_z)
ds_predicted_classes = self.domain_classifier(ds_z)
predicted_domain_ds_loss = F.cross_entropy(ds_predicted_classes, all_z)
z = torch.cat((di_z, ds_z), dim = 1)
predicted_classes = self.classifier(z)
acc_loss = F.cross_entropy(predicted_classes, all_y)
total_loss = acc_loss + predicted_domain_di_loss + disentangle_loss + predicted_domain_ds_loss
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
num_mb = len(minibatches)
meta_objective = 0
self.meta_optimizer.zero_grad()
self_param = list(self.ds_featurizer.parameters()) + list(self.classifier.parameters())
for p in self_param:
if p.grad is None:
p.grad = torch.zeros_like(p)
for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
inner_zs_model = copy.deepcopy(self.ds_featurizer)
inner_classifier = copy.deepcopy(self.classifier)
inner_param = list(inner_zs_model.parameters()) + list(inner_classifier.parameters())
inner_opt = torch.optim.Adam(
inner_param,
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
di_z, ds_z = self.di_featurizer(xi), inner_zs_model(xi)
z = torch.cat((di_z, ds_z), dim = 1)
predicted_classes = inner_classifier(z)
inner_obj = F.cross_entropy(predicted_classes, yi)
inner_opt.zero_grad()
inner_obj.backward()
inner_opt.step()
for p_tgt, p_src in zip(self_param, inner_param):
if p_src.grad is not None:
p_tgt.grad.data.add_(p_src.grad.data / num_mb)
meta_objective += inner_obj.item()
di_z, ds_z = self.di_featurizer(xj), inner_zs_model(xj)
z = torch.cat((di_z, ds_z), dim = 1)
predicted_classes = inner_classifier(z)
loss_inner_j = F.cross_entropy(predicted_classes, yj)
grad_inner_j = autograd.grad(loss_inner_j, inner_param,
allow_unused=True)
meta_objective += (1.0 * loss_inner_j).item()
for p, g_j in zip(self_param, grad_inner_j):
if g_j is not None:
p.grad.data.add_(1.0 * g_j.data / num_mb)
meta_objective /= len(minibatches)
self.meta_optimizer.step()
total_loss = total_loss.item() + meta_objective
return {'loss': total_loss}
def predict(self, x):
di_z, ds_z = self.di_featurizer(x), self.ds_featurizer(x)
z = torch.cat((di_z, ds_z), dim = 1)
return self.classifier(z) | 51,299 | 35.305732 | 232 | py |
aidgn | aidgn-main/domainbed/test/test_datasets.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Unit tests."""
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from parameterized import parameterized
from domainbed.test import helpers
class TestDatasets(unittest.TestCase):
@parameterized.expand(itertools.product(datasets.DATASETS))
@unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment '
'variable')
def test_dataset_erm(self, dataset_name):
"""
Test that ERM can complete one step on a given dataset without raising
an error.
Also test that num_environments() works correctly.
"""
batch_size = 8
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)(
os.environ['DATA_DIR'], [], hparams)
self.assertEqual(datasets.num_environments(dataset_name),
len(dataset))
algorithm = algorithms.get_algorithm_class('ERM')(
dataset.input_shape,
dataset.num_classes,
len(dataset),
hparams).cuda()
minibatches = helpers.make_minibatches(dataset, batch_size)
algorithm.update(minibatches)
| 1,454 | 28.1 | 80 | py |
aidgn | aidgn-main/domainbed/test/test_networks.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from parameterized import parameterized
class TestNetworks(unittest.TestCase):
@parameterized.expand(itertools.product(helpers.DEBUG_DATASETS))
def test_featurizer(self, dataset_name):
"""Test that Featurizer() returns a module which can take a
correctly-sized input and return a correctly-sized output."""
batch_size = 8
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)('', [], hparams)
input_ = helpers.make_minibatches(dataset, batch_size)[0][0]
input_shape = dataset.input_shape
algorithm = networks.Featurizer(input_shape, hparams).cuda()
output = algorithm(input_)
self.assertEqual(list(output.shape), [batch_size, algorithm.n_outputs])
| 1,181 | 30.105263 | 79 | py |
aidgn | aidgn-main/domainbed/test/test_models.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Unit tests."""
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from parameterized import parameterized
class TestAlgorithms(unittest.TestCase):
@parameterized.expand(itertools.product(helpers.DEBUG_DATASETS, algorithms.ALGORITHMS))
def test_init_update_predict(self, dataset_name, algorithm_name):
"""Test that a given algorithm inits, updates and predicts without raising
errors."""
batch_size = 8
hparams = hparams_registry.default_hparams(algorithm_name, dataset_name)
dataset = datasets.get_dataset_class(dataset_name)('', [], hparams)
minibatches = helpers.make_minibatches(dataset, batch_size)
algorithm_class = algorithms.get_algorithm_class(algorithm_name)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes, len(dataset),
hparams).cuda()
for _ in range(3):
self.assertIsNotNone(algorithm.update(minibatches))
algorithm.eval()
self.assertEqual(list(algorithm.predict(minibatches[0][0]).shape),
[batch_size, dataset.num_classes])
| 1,427 | 31.454545 | 91 | py |
aidgn | aidgn-main/domainbed/test/helpers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
DEBUG_DATASETS = ['Debug28', 'Debug224']
def make_minibatches(dataset, batch_size):
"""Test helper to make a minibatches array like train.py"""
minibatches = []
for env in dataset:
X = torch.stack([env[i][0] for i in range(batch_size)]).cuda()
y = torch.stack([torch.as_tensor(env[i][1])
for i in range(batch_size)]).cuda()
minibatches.append((X, y))
return minibatches
| 509 | 30.875 | 70 | py |
aidgn | aidgn-main/domainbed/test/test_model_selection.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Unit tests."""
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import model_selection
from domainbed.lib.query import Q
from parameterized import parameterized
def make_record(step, hparams_seed, envs):
"""envs is a list of (in_acc, out_acc, is_test_env) tuples"""
result = {
'args': {'test_envs': [], 'hparams_seed': hparams_seed},
'step': step
}
for i, (in_acc, out_acc, is_test_env) in enumerate(envs):
if is_test_env:
result['args']['test_envs'].append(i)
result[f'env{i}_in_acc'] = in_acc
result[f'env{i}_out_acc'] = out_acc
return result
class TestSelectionMethod(unittest.TestCase):
class MySelectionMethod(model_selection.SelectionMethod):
@classmethod
def run_acc(self, run_records):
return {
'val_acc': run_records[0]['env0_out_acc'],
'test_acc': run_records[0]['env0_in_acc']
}
def test_sweep_acc(self):
sweep_records = Q([
make_record(0, 0, [(0.7, 0.8, True)]),
make_record(0, 1, [(0.9, 0.5, True)])
])
self.assertEqual(
self.MySelectionMethod.sweep_acc(sweep_records),
0.7
)
def test_sweep_acc_empty(self):
self.assertEqual(
self.MySelectionMethod.sweep_acc(Q([])),
None
)
class TestOracleSelectionMethod(unittest.TestCase):
def test_run_acc_best_first(self):
"""Test run_acc() when the run has two records and the best one comes
first"""
run_records = Q([
make_record(0, 0, [(0.75, 0.70, True)]),
make_record(1, 0, [(0.65, 0.60, True)])
])
self.assertEqual(
model_selection.OracleSelectionMethod.run_acc(run_records),
{'val_acc': 0.60, 'test_acc': 0.65}
)
def test_run_acc_best_last(self):
"""Test run_acc() when the run has two records and the best one comes
last"""
run_records = Q([
make_record(0, 0, [(0.75, 0.70, True)]),
make_record(1, 0, [(0.85, 0.80, True)])
])
self.assertEqual(
model_selection.OracleSelectionMethod.run_acc(run_records),
{'val_acc': 0.80, 'test_acc': 0.85}
)
def test_run_acc_empty(self):
"""Test run_acc() when there are no valid records to choose from."""
self.assertEqual(
model_selection.OracleSelectionMethod.run_acc(Q([])),
None
)
class TestIIDAccuracySelectionMethod(unittest.TestCase):
def test_run_acc(self):
run_records = Q([
make_record(0, 0,
[(0.1, 0.2, True), (0.5, 0.6, False), (0.6, 0.7, False)]),
make_record(1, 0,
[(0.3, 0.4, True), (0.6, 0.7, False), (0.7, 0.8, False)]),
])
self.assertEqual(
model_selection.IIDAccuracySelectionMethod.run_acc(run_records),
{'val_acc': 0.75, 'test_acc': 0.3}
)
def test_run_acc_empty(self):
self.assertEqual(
model_selection.IIDAccuracySelectionMethod.run_acc(Q([])),
None)
class TestLeaveOneOutSelectionMethod(unittest.TestCase):
def test_run_acc(self):
run_records = Q([
make_record(0, 0,
[(0.1, 0., True), (0.0, 0., False), (0.0, 0., False)]),
make_record(0, 0,
[(0.0, 0., True), (0.5, 0., True), (0., 0., False)]),
make_record(0, 0,
[(0.0, 0., True), (0.0, 0., False), (0.6, 0., True)]),
])
self.assertEqual(
model_selection.LeaveOneOutSelectionMethod.run_acc(run_records),
{'val_acc': 0.55, 'test_acc': 0.1}
)
def test_run_acc_empty(self):
run_records = Q([
make_record(0, 0,
[(0.1, 0., True), (0.0, 0., False), (0.0, 0., False)]),
make_record(0, 0,
[(0.0, 0., True), (0.5, 0., True), (0., 0., False)]),
])
self.assertEqual(
model_selection.LeaveOneOutSelectionMethod.run_acc(run_records),
None
)
| 4,334 | 29.744681 | 77 | py |
aidgn | aidgn-main/domainbed/test/scripts/test_train.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# import argparse
# import itertools
import json
import os
import subprocess
# import sys
# import time
import unittest
import uuid
import torch
# import datasets
# import hparams_registry
# import algorithms
# import networks
# from parameterized import parameterized
# import test.helpers
class TestTrain(unittest.TestCase):
@unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment '
'variable')
def test_end_to_end(self):
"""Test that train.py successfully completes one step"""
output_dir = os.path.join('/tmp', str(uuid.uuid4()))
os.makedirs(output_dir, exist_ok=True)
subprocess.run(f'python -m domainbed.scripts.train --dataset RotatedMNIST '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--steps=501', shell=True)
with open(os.path.join(output_dir, 'results.jsonl')) as f:
lines = [l[:-1] for l in f]
last_epoch = json.loads(lines[-1])
self.assertEqual(last_epoch['step'], 500)
# Conservative values; anything lower and something's likely wrong.
self.assertGreater(last_epoch['env0_in_acc'], 0.80)
self.assertGreater(last_epoch['env1_in_acc'], 0.95)
self.assertGreater(last_epoch['env2_in_acc'], 0.95)
self.assertGreater(last_epoch['env3_in_acc'], 0.95)
self.assertGreater(last_epoch['env3_in_acc'], 0.95)
with open(os.path.join(output_dir, 'out.txt')) as f:
text = f.read()
self.assertTrue('500' in text)
| 1,654 | 32.1 | 83 | py |
aidgn | aidgn-main/domainbed/test/scripts/test_sweep.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from domainbed.scripts import sweep
from parameterized import parameterized
class TestSweep(unittest.TestCase):
def test_job(self):
"""Test that a newly-created job has valid
output_dir, state, and command_str properties."""
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
self.assertTrue(job.output_dir.startswith(sweep_output_dir))
self.assertEqual(job.state, sweep.Job.NOT_LAUNCHED)
self.assertEqual(job.command_str,
f'python -m domainbed.scripts.train --foo bar --output_dir {job.output_dir}')
def test_job_launch(self):
"""Test that launching a job calls the launcher_fn with appropariate
arguments, and sets the job to INCOMPLETE state."""
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
launcher_fn_called = False
def launcher_fn(commands):
nonlocal launcher_fn_called
launcher_fn_called = True
self.assertEqual(len(commands), 1)
self.assertEqual(commands[0], job.command_str)
sweep.Job.launch([job], launcher_fn)
self.assertTrue(launcher_fn_called)
job = sweep.Job(train_args, sweep_output_dir)
self.assertEqual(job.state, sweep.Job.INCOMPLETE)
def test_job_delete(self):
"""Test that deleting a launched job returns it to the NOT_LAUNCHED
state"""
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
sweep.Job.launch([job], (lambda commands: None))
sweep.Job.delete([job])
job = sweep.Job(train_args, sweep_output_dir)
self.assertEqual(job.state, sweep.Job.NOT_LAUNCHED)
def test_make_args_list(self):
"""Test that, for a typical input, make_job_list returns a list
of the correct length"""
args_list = sweep.make_args_list(
n_trials=2,
dataset_names=['Debug28'],
algorithms=['ERM'],
n_hparams_from=0,
n_hparams=3,
steps=123,
data_dir='/tmp/data',
task='domain_generalization',
holdout_fraction=0.2,
single_test_envs=False,
hparams=None
)
assert(len(args_list) == 2*3*(3+3))
@unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment '
'variable')
def test_end_to_end(self):
output_dir = os.path.join('/tmp', str(uuid.uuid4()))
result = subprocess.run(f'python -m domainbed.scripts.sweep launch '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split("\n")
dummy_launcher_lines = [l for l in stdout_lines
if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 6)
# Now run it again and make sure it doesn't try to relaunch those jobs
result = subprocess.run(f'python -m domainbed.scripts.sweep launch '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split("\n")
dummy_launcher_lines = [l for l in stdout_lines
if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 0)
# Delete the incomplete jobs, try launching again, and make sure they
# get relaunched.
subprocess.run(f'python -m domainbed.scripts.sweep delete_incomplete '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
result = subprocess.run(f'python -m domainbed.scripts.sweep launch '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split("\n")
dummy_launcher_lines = [l for l in stdout_lines
if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 6)
| 5,273 | 39.569231 | 89 | py |
aidgn | aidgn-main/domainbed/test/scripts/test_collect_results.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from domainbed.scripts import collect_results
from parameterized import parameterized
import io
import textwrap
class TestCollectResults(unittest.TestCase):
def test_format_mean(self):
self.assertEqual(
collect_results.format_mean([0.1, 0.2, 0.3], False)[2],
'20.0 +/- 4.7')
self.assertEqual(
collect_results.format_mean([0.1, 0.2, 0.3], True)[2],
'20.0 $\pm$ 4.7')
def test_print_table_non_latex(self):
temp_out = io.StringIO()
sys.stdout = temp_out
table = [['1', '2'], ['3', '4']]
collect_results.print_table(table, 'Header text', ['R1', 'R2'],
['C1', 'C2'], colwidth=10, latex=False)
sys.stdout = sys.__stdout__
self.assertEqual(
temp_out.getvalue(),
textwrap.dedent("""
-------- Header text
C1 C2
R1 1 2
R2 3 4
""")
)
def test_print_table_latex(self):
temp_out = io.StringIO()
sys.stdout = temp_out
table = [['1', '2'], ['3', '4']]
collect_results.print_table(table, 'Header text', ['R1', 'R2'],
['C1', 'C2'], colwidth=10, latex=True)
sys.stdout = sys.__stdout__
self.assertEqual(
temp_out.getvalue(),
textwrap.dedent(r"""
\begin{center}
\adjustbox{max width=\textwidth}{%
\begin{tabular}{lcc}
\toprule
\textbf{C1 & \textbf{C2 \\
\midrule
R1 & 1 & 2 \\
R2 & 3 & 4 \\
\bottomrule
\end{tabular}}
\end{center}
""")
)
def test_get_grouped_records(self):
pass # TODO
def test_print_results_tables(self):
pass # TODO
def test_load_records(self):
pass # TODO
def test_end_to_end(self):
"""
Test that collect_results.py's output matches a manually-verified
ground-truth when run on a given directory of test sweep data.
If you make any changes to the output of collect_results.py, you'll need
to update the ground-truth and manually verify that it's still
correct. The command used to update the ground-truth is:
python -m domainbed.scripts.collect_results --input_dir=domainbed/misc/test_sweep_data \
| tee domainbed/misc/test_sweep_results.txt
Furthermore, if you make any changes to the data format, you'll also
need to rerun the test sweep. The command used to run the test sweep is:
python -m domainbed.scripts.sweep launch --data_dir=$DATA_DIR \
--output_dir=domainbed/misc/test_sweep_data --algorithms ERM \
--datasets VLCS --steps 1001 --n_hparams 2 --n_trials 2 \
--command_launcher local
"""
result = subprocess.run('python -m domainbed.scripts.collect_results'
' --input_dir=domainbed/misc/test_sweep_data', shell=True,
stdout=subprocess.PIPE)
with open('domainbed/misc/test_sweep_results.txt', 'r') as f:
ground_truth = f.read()
self.assertEqual(result.stdout.decode('utf8'), ground_truth)
| 3,718 | 31.622807 | 96 | py |
aidgn | aidgn-main/domainbed/scripts/save_images.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Save some representative images from each dataset to disk.
"""
import random
import torch
import argparse
from domainbed import hparams_registry
from domainbed import datasets
import imageio
import os
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
datasets_to_save = ['OfficeHome', 'TerraIncognita', 'DomainNet', 'RotatedMNIST', 'ColoredMNIST', 'SVIRO']
for dataset_name in tqdm(datasets_to_save):
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)(
args.data_dir,
list(range(datasets.num_environments(dataset_name))),
hparams)
for env_idx, env in enumerate(tqdm(dataset)):
for i in tqdm(range(50)):
idx = random.choice(list(range(len(env))))
x, y = env[idx]
while y > 10:
idx = random.choice(list(range(len(env))))
x, y = env[idx]
if x.shape[0] == 2:
x = torch.cat([x, torch.zeros_like(x)], dim=0)[:3,:,:]
if x.min() < 0:
mean = torch.tensor([0.485, 0.456, 0.406])[:,None,None]
std = torch.tensor([0.229, 0.224, 0.225])[:,None,None]
x = (x * std) + mean
assert(x.min() >= 0)
assert(x.max() <= 1)
x = (x * 255.99)
x = x.numpy().astype('uint8').transpose(1,2,0)
imageio.imwrite(
os.path.join(args.output_dir,
f'{dataset_name}_env{env_idx}{dataset.ENVIRONMENTS[env_idx]}_{i}_idx{idx}_class{y}.png'),
x)
| 2,029 | 38.803922 | 113 | py |
aidgn | aidgn-main/domainbed/scripts/sweep.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Run sweeps
"""
import argparse
import copy
import getpass
import hashlib
import json
import os
import random
import shutil
import time
import uuid
import numpy as np
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed.lib import misc
from domainbed import command_launchers
import tqdm
import shlex
class Job:
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = ['python', '-m', 'domainbed.scripts.train']
for k, v in sorted(self.train_args.items()):
if isinstance(v, list):
v = ' '.join([str(v_) for v_ in v])
elif isinstance(v, str):
v = shlex.quote(v)
command.append(f'--{k} {v}')
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
self.state = Job.DONE
elif os.path.exists(self.output_dir):
self.state = Job.INCOMPLETE
else:
self.state = Job.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'])
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
@staticmethod
def delete(jobs):
print('Deleting...')
for job in jobs:
shutil.rmtree(job.output_dir)
print(f'Deleted {len(jobs)} jobs!')
def all_test_env_combinations(n):
"""
For a dataset with n >= 3 envs, return all combinations of 1 and 2 test
envs.
"""
assert(n >= 3)
for i in range(n):
yield [i]
for j in range(i+1, n):
yield [i, j]
def make_args_list(n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps,
data_dir, task, holdout_fraction, single_test_envs, hparams):
args_list = []
for trial_seed in range(n_trials):
for dataset in dataset_names:
for algorithm in algorithms:
if single_test_envs:
all_test_envs = [
[i] for i in range(datasets.num_environments(dataset))]
else:
all_test_envs = all_test_env_combinations(
datasets.num_environments(dataset))
for test_envs in all_test_envs:
for hparams_seed in range(n_hparams_from, n_hparams):
train_args = {}
train_args['dataset'] = dataset
train_args['algorithm'] = algorithm
train_args['test_envs'] = test_envs
train_args['holdout_fraction'] = holdout_fraction
train_args['hparams_seed'] = hparams_seed
train_args['data_dir'] = data_dir
train_args['task'] = task
train_args['trial_seed'] = trial_seed
train_args['seed'] = 0
# train_args['seed'] = misc.seed_hash(dataset,
# algorithm, test_envs, hparams_seed, trial_seed)
if steps is not None:
train_args['steps'] = steps
if hparams is not None:
train_args['hparams'] = hparams
args_list.append(train_args)
return args_list
def ask_for_confirmation():
response = input('Are you sure? (y/n) ')
if not response.lower().strip()[:1] == "y":
print('Nevermind!')
exit(0)
DATASETS = [d for d in datasets.DATASETS if "Debug" not in d]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run a sweep')
parser.add_argument('command', choices=['launch', 'delete_incomplete'])
parser.add_argument('--datasets', nargs='+', type=str, default=DATASETS)
parser.add_argument('--algorithms', nargs='+', type=str, default=algorithms.ALGORITHMS)
parser.add_argument('--task', type=str, default="domain_generalization")
parser.add_argument('--n_hparams_from', type=int, default=0)
parser.add_argument('--n_hparams', type=int, default=20)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n_trials', type=int, default=3)
parser.add_argument('--command_launcher', type=str, required=True)
parser.add_argument('--steps', type=int, default=None)
parser.add_argument('--hparams', type=str, default=None)
parser.add_argument('--holdout_fraction', type=float, default=0.2)
parser.add_argument('--single_test_envs', type=bool, default=True)#action='store_true')
parser.add_argument('--skip_confirmation', type=bool, default=True)#action='store_true')
args = parser.parse_args()
args_list = make_args_list(
n_trials=args.n_trials,
dataset_names=args.datasets,
algorithms=args.algorithms,
n_hparams_from=args.n_hparams_from,
n_hparams=args.n_hparams,
steps=args.steps,
data_dir=args.data_dir,
task=args.task,
holdout_fraction=args.holdout_fraction,
single_test_envs=args.single_test_envs,
hparams=args.hparams
)
jobs = [Job(train_args, args.output_dir) for train_args in args_list]
for job in jobs:
print(job)
print("{} jobs: {} done, {} incomplete, {} not launched.".format(
len(jobs),
len([j for j in jobs if j.state == Job.DONE]),
len([j for j in jobs if j.state == Job.INCOMPLETE]),
len([j for j in jobs if j.state == Job.NOT_LAUNCHED]))
)
if args.command == 'launch':
to_launch = [j for j in jobs if j.state == Job.NOT_LAUNCHED]
print(f'About to launch {len(to_launch)} jobs.')
if not args.skip_confirmation:
ask_for_confirmation()
launcher_fn = command_launchers.REGISTRY[args.command_launcher]
Job.launch(to_launch, launcher_fn)
elif args.command == 'delete_incomplete':
to_delete = [j for j in jobs if j.state == Job.INCOMPLETE]
print(f'About to delete {len(to_delete)} jobs.')
if not args.skip_confirmation:
ask_for_confirmation()
Job.delete(to_delete)
| 7,332 | 36.035354 | 92 | py |
aidgn | aidgn-main/domainbed/scripts/download.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torchvision.datasets import MNIST
import xml.etree.ElementTree as ET
from zipfile import ZipFile
import argparse
import tarfile
import shutil
import gdown
import uuid
import json
import os
from wilds.datasets.camelyon17_dataset import Camelyon17Dataset
from wilds.datasets.fmow_dataset import FMoWDataset
# utils #######################################################################
def stage_path(data_dir, name):
full_path = os.path.join(data_dir, name)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
def download_and_extract(url, dst, remove=True):
gdown.download(url, dst, quiet=False)
if dst.endswith(".tar.gz"):
tar = tarfile.open(dst, "r:gz")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".tar"):
tar = tarfile.open(dst, "r:")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".zip"):
zf = ZipFile(dst, "r")
zf.extractall(os.path.dirname(dst))
zf.close()
if remove:
os.remove(dst)
# VLCS ########################################################################
# Slower, but builds dataset from the original sources
#
# def download_vlcs(data_dir):
# full_path = stage_path(data_dir, "VLCS")
#
# tmp_path = os.path.join(full_path, "tmp/")
# if not os.path.exists(tmp_path):
# os.makedirs(tmp_path)
#
# with open("domainbed/misc/vlcs_files.txt", "r") as f:
# lines = f.readlines()
# files = [line.strip().split() for line in lines]
#
# download_and_extract("http://pjreddie.com/media/files/VOCtrainval_06-Nov-2007.tar",
# os.path.join(tmp_path, "voc2007_trainval.tar"))
#
# download_and_extract("https://drive.google.com/uc?id=1I8ydxaAQunz9R_qFFdBFtw6rFTUW9goz",
# os.path.join(tmp_path, "caltech101.tar.gz"))
#
# download_and_extract("http://groups.csail.mit.edu/vision/Hcontext/data/sun09_hcontext.tar",
# os.path.join(tmp_path, "sun09_hcontext.tar"))
#
# tar = tarfile.open(os.path.join(tmp_path, "sun09.tar"), "r:")
# tar.extractall(tmp_path)
# tar.close()
#
# for src, dst in files:
# class_folder = os.path.join(data_dir, dst)
#
# if not os.path.exists(class_folder):
# os.makedirs(class_folder)
#
# dst = os.path.join(class_folder, uuid.uuid4().hex + ".jpg")
#
# if "labelme" in src:
# # download labelme from the web
# gdown.download(src, dst, quiet=False)
# else:
# src = os.path.join(tmp_path, src)
# shutil.copyfile(src, dst)
#
# shutil.rmtree(tmp_path)
def download_vlcs(data_dir):
# Original URL: http://www.eecs.qmul.ac.uk/~dl307/project_iccv2017
full_path = stage_path(data_dir, "VLCS")
download_and_extract("https://drive.google.com/uc?id=1skwblH1_okBwxWxmRsp9_qi15hyPpxg8",
os.path.join(data_dir, "VLCS.tar.gz"))
# MNIST #######################################################################
def download_mnist(data_dir):
# Original URL: http://yann.lecun.com/exdb/mnist/
full_path = stage_path(data_dir, "MNIST")
MNIST(full_path, download=True)
# PACS ########################################################################
def download_pacs(data_dir):
# Original URL: http://www.eecs.qmul.ac.uk/~dl307/project_iccv2017
full_path = stage_path(data_dir, "PACS")
download_and_extract("https://drive.google.com/uc?id=0B6x7gtvErXgfbF9CSk53UkRxVzg",
os.path.join(data_dir, "PACS.zip"))
os.rename(os.path.join(data_dir, "kfold"),
full_path)
# Office-Home #################################################################
def download_office_home(data_dir):
# Original URL: http://hemanthdv.org/OfficeHome-Dataset/
full_path = stage_path(data_dir, "office_home")
download_and_extract("https://drive.google.com/uc?id=0B81rNlvomiwed0V1YUxQdC1uOTg",
os.path.join(data_dir, "office_home.zip"))
os.rename(os.path.join(data_dir, "OfficeHomeDataset_10072016"),
full_path)
# DomainNET ###################################################################
def download_domain_net(data_dir):
# Original URL: http://ai.bu.edu/M3SDA/
full_path = stage_path(data_dir, "domain_net")
urls = [
"http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/clipart.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/infograph.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/painting.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/quickdraw.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/real.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/sketch.zip"
]
for url in urls:
download_and_extract(url, os.path.join(full_path, url.split("/")[-1]))
with open("domainbed/misc/domain_net_duplicates.txt", "r") as f:
for line in f.readlines():
try:
os.remove(os.path.join(full_path, line.strip()))
except OSError:
pass
# TerraIncognita ##############################################################
def download_terra_incognita(data_dir):
# Original URL: https://beerys.github.io/CaltechCameraTraps/
full_path = stage_path(data_dir, "terra_incognita")
download_and_extract(
"http://www.vision.caltech.edu/~sbeery/datasets/caltechcameratraps18/eccv_18_all_images_sm.tar.gz",
os.path.join(full_path, "terra_incognita_images.tar.gz"))
download_and_extract(
"http://www.vision.caltech.edu/~sbeery/datasets/caltechcameratraps18/eccv_18_all_annotations.tar.gz",
os.path.join(full_path, "terra_incognita_annotations.tar.gz"))
include_locations = [38, 46, 100, 43]
include_categories = [
"bird", "bobcat", "cat", "coyote", "dog", "empty", "opossum", "rabbit",
"raccoon", "squirrel"
]
images_folder = os.path.join(full_path, "eccv_18_all_images_sm/")
annotations_file = os.path.join(full_path, "CaltechCameraTrapsECCV18.json")
destination_folder = full_path
stats = {}
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
with open(annotations_file, "r") as f:
data = json.load(f)
category_dict = {}
for item in data['categories']:
category_dict[item['id']] = item['name']
for image in data['images']:
image_location = image['location']
if image_location not in include_locations:
continue
loc_folder = os.path.join(destination_folder,
'location_' + str(image_location) + '/')
if not os.path.exists(loc_folder):
os.mkdir(loc_folder)
image_id = image['id']
image_fname = image['file_name']
for annotation in data['annotations']:
if annotation['image_id'] == image_id:
if image_location not in stats:
stats[image_location] = {}
category = category_dict[annotation['category_id']]
if category not in include_categories:
continue
if category not in stats[image_location]:
stats[image_location][category] = 0
else:
stats[image_location][category] += 1
loc_cat_folder = os.path.join(loc_folder, category + '/')
if not os.path.exists(loc_cat_folder):
os.mkdir(loc_cat_folder)
dst_path = os.path.join(loc_cat_folder, image_fname)
src_path = os.path.join(images_folder, image_fname)
shutil.copyfile(src_path, dst_path)
shutil.rmtree(images_folder)
os.remove(annotations_file)
def download_terra_incognita_domainbed(data_dir):
# Original URL: https://beerys.github.io/CaltechCameraTraps/
# New URL: http://lila.science/datasets/caltech-camera-traps
full_path = stage_path(data_dir, "terra_incognita")
download_and_extract(
"https://lilablobssc.blob.core.windows.net/caltechcameratraps/eccv_18_all_images_sm.tar.gz",
os.path.join(full_path, "terra_incognita_images.tar.gz"))
download_and_extract(
"https://lilablobssc.blob.core.windows.net/caltechcameratraps/labels/caltech_camera_traps.json.zip",
os.path.join(full_path, "caltech_camera_traps.json.zip"))
include_locations = ["38", "46", "100", "43"]
include_categories = [
"bird", "bobcat", "cat", "coyote", "dog", "empty", "opossum", "rabbit",
"raccoon", "squirrel"
]
images_folder = os.path.join(full_path, "eccv_18_all_images_sm/")
annotations_file = os.path.join(full_path, "caltech_images_20210113.json")
destination_folder = full_path
stats = {}
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
with open(annotations_file, "r") as f:
data = json.load(f)
category_dict = {}
for item in data['categories']:
category_dict[item['id']] = item['name']
for image in data['images']:
image_location = image['location']
if image_location not in include_locations:
continue
loc_folder = os.path.join(destination_folder,
'location_' + str(image_location) + '/')
if not os.path.exists(loc_folder):
os.mkdir(loc_folder)
image_id = image['id']
image_fname = image['file_name']
for annotation in data['annotations']:
if annotation['image_id'] == image_id:
if image_location not in stats:
stats[image_location] = {}
category = category_dict[annotation['category_id']]
if category not in include_categories:
continue
if category not in stats[image_location]:
stats[image_location][category] = 0
else:
stats[image_location][category] += 1
loc_cat_folder = os.path.join(loc_folder, category + '/')
if not os.path.exists(loc_cat_folder):
os.mkdir(loc_cat_folder)
dst_path = os.path.join(loc_cat_folder, image_fname)
src_path = os.path.join(images_folder, image_fname)
shutil.copyfile(src_path, dst_path)
shutil.rmtree(images_folder)
os.remove(annotations_file)
def process_terra_incognita(data_dir):
# Original URL: https://beerys.github.io/CaltechCameraTraps/
# New URL: http://lila.science/datasets/caltech-camera-traps
full_path = stage_path(data_dir, "Terra_incognita")
dst = os.path.join(full_path, "terra_incognita_images.tar.gz")
tar = tarfile.open(dst, "r:gz")
tar.extractall(os.path.dirname(dst))
tar.close()
os.remove(dst)
dst = os.path.join(full_path, "caltech_camera_traps.json.zip")
zf = ZipFile(dst, "r")
zf.extractall(os.path.dirname(dst))
zf.close()
include_locations = ["38", "46", "100", "43"]
include_categories = [
"bird", "bobcat", "cat", "coyote", "dog", "empty", "opossum", "rabbit",
"raccoon", "squirrel"
]
images_folder = os.path.join(full_path, "eccv_18_all_images_sm/")
annotations_file = os.path.join(full_path, "caltech_images_20210113.json")
destination_folder = full_path
stats = {}
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
with open(annotations_file, "r") as f:
data = json.load(f)
category_dict = {}
for item in data['categories']:
category_dict[item['id']] = item['name']
for image in data['images']:
image_location = image['location']
if image_location not in include_locations:
continue
loc_folder = os.path.join(destination_folder,
'location_' + str(image_location) + '/')
if not os.path.exists(loc_folder):
os.mkdir(loc_folder)
image_id = image['id']
image_fname = image['file_name']
for annotation in data['annotations']:
if annotation['image_id'] == image_id:
if image_location not in stats:
stats[image_location] = {}
category = category_dict[annotation['category_id']]
if category not in include_categories:
continue
if category not in stats[image_location]:
stats[image_location][category] = 0
else:
stats[image_location][category] += 1
loc_cat_folder = os.path.join(loc_folder, category + '/')
if not os.path.exists(loc_cat_folder):
os.mkdir(loc_cat_folder)
dst_path = os.path.join(loc_cat_folder, image_fname)
src_path = os.path.join(images_folder, image_fname)
shutil.copyfile(src_path, dst_path)
shutil.rmtree(images_folder)
os.remove(annotations_file)
# SVIRO #################################################################
def download_sviro(data_dir):
# Original URL: https://sviro.kl.dfki.de
full_path = stage_path(data_dir, "sviro")
download_and_extract("https://sviro.kl.dfki.de/?wpdmdl=1731",
os.path.join(data_dir, "sviro_grayscale_rectangle_classification.zip"))
os.rename(os.path.join(data_dir, "SVIRO_DOMAINBED"),
full_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Download datasets')
parser.add_argument('--data_dir', type=str, required=True)
args = parser.parse_args()
# download_mnist(args.data_dir)
# download_pacs(args.data_dir)
# download_office_home(args.data_dir)
# download_domain_net(args.data_dir)
# download_vlcs(args.data_dir)
process_terra_incognita(args.data_dir)
# download_sviro(args.data_dir)
# Camelyon17Dataset(root_dir=args.data_dir, download=True)
#FMoWDataset(root_dir=args.data_dir, download=True)
| 14,359 | 31.860412 | 109 | py |
aidgn | aidgn-main/domainbed/scripts/train.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import collections
import json
import os
import random
import sys
import time
import uuid
import numpy as np
import PIL
import torch
import torchvision
import torch.utils.data
from matplotlib import pyplot as plt
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import model_selection
from domainbed.lib import misc
from domainbed.lib.fast_data_loader import InfiniteDataLoader, FastDataLoader
from domainbed.lib.query import Q
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--dataset', type=str, default="RotatedMNIST")
parser.add_argument('--algorithm', type=str, default="ERM")
parser.add_argument('--task', type=str, default="domain_generalization",
choices=["domain_generalization", "domain_adaptation"])
parser.add_argument('--hparams', type=str,
help='JSON-serialized hparams dict')
parser.add_argument('--hparams_seed', type=int, default=0,
help='Seed for random hparams (0 means "default hparams")')
parser.add_argument('--trial_seed', type=int, default=0,
help='Trial number (used for seeding split_dataset and '
'random_hparams).')
parser.add_argument('--seed', type=int, default=0,
help='Seed for everything else')
parser.add_argument('--steps', type=int, default=None,
help='Number of steps. Default is dataset-dependent.')
parser.add_argument('--checkpoint_freq', type=int, default=None,
help='Checkpoint every N steps. Default is dataset-dependent.')
parser.add_argument('--test_envs', type=int, nargs='+', default=[0])
parser.add_argument('--output_dir', type=str, default="train_output")
parser.add_argument('--holdout_fraction', type=float, default=0.2)
parser.add_argument('--uda_holdout_fraction', type=float, default=0,
help="For domain adaptation, % of test to use unlabeled for training.")
parser.add_argument('--skip_model_save', action='store_true')
parser.add_argument('--save_model_every_checkpoint', action='store_true')
parser.add_argument('--gpu', default='0', type=str)
parser.add_argument('--select_M', default=1, type=int)
args = parser.parse_args()
# If we ever want to implement checkpointing, just persist these values
# every once in a while, and then load them from disk here.
start_step = 0
algorithm_dict = None
os.makedirs(args.output_dir, exist_ok=True)
sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out.txt'))
sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err.txt'))
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
if args.task == "domain_adaptation" and len(uda_splits) == 0:
raise ValueError("Not enough unlabeled samples for domain adaptation.")
train_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(in_splits)
if i not in args.test_envs]
uda_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(uda_splits)
if i in args.test_envs]
eval_loaders = [FastDataLoader(
dataset=env,
batch_size=64,
num_workers=dataset.N_WORKERS)
for env, _ in (in_splits + out_splits + uda_splits)]
eval_weights = [None for _, weights in (in_splits + out_splits + uda_splits)]
eval_loader_names = ['env{}_in'.format(i)
for i in range(len(in_splits))]
eval_loader_names += ['env{}_out'.format(i)
for i in range(len(out_splits))]
eval_loader_names += ['env{}_uda'.format(i)
for i in range(len(uda_splits))]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes,
len(dataset) - len(args.test_envs), hparams)
if algorithm_dict is not None:
algorithm.load_state_dict(algorithm_dict)
algorithm.to(device)
train_minibatches_iterator = zip(*train_loaders)
uda_minibatches_iterator = zip(*uda_loaders)
checkpoint_vals = collections.defaultdict(lambda: [])
steps_per_epoch = min([len(env)/hparams['batch_size'] for env,_ in in_splits])
n_steps = args.steps or dataset.N_STEPS
checkpoint_freq = args.checkpoint_freq or dataset.CHECKPOINT_FREQ
def save_checkpoint(filename):
if args.skip_model_save:
return
save_dict = {
"args": vars(args),
"model_input_shape": dataset.input_shape,
"model_num_classes": dataset.num_classes,
"model_num_domains": len(dataset) - len(args.test_envs),
"model_hparams": hparams,
"model_dict": algorithm.cpu().state_dict()
}
torch.save(save_dict, os.path.join(args.output_dir, filename))
max_val_acc = 0
training_val_max_model = algorithm.state_dict()
last_results_keys = None
for step in range(start_step, n_steps):
step_start_time = time.time()
if hparams['PAC']:
minibatches_device = [(x.to(device), y.to(device), z.to(device))
for x,y,z in next(train_minibatches_iterator)]
else:
minibatches_device = [(x.to(device), y.to(device), z.to(device))
for x,y,z in next(train_minibatches_iterator)]
if args.task == "domain_adaptation":
uda_device = [x.to(device)
for x,_ in next(uda_minibatches_iterator)]
else:
uda_device = None
step_vals = algorithm.update(step, minibatches_device, uda_device)
algorithm.lr_scheduler.step()
checkpoint_vals['step_time'].append(time.time() - step_start_time)
for key, val in step_vals.items():
checkpoint_vals[key].append(val)
if (step % checkpoint_freq == 0) or (step == n_steps - 1):
results = {
'step': step,
'epoch': step / steps_per_epoch,
}
for key, val in checkpoint_vals.items():
results[key] = np.mean(val)
evals = zip(eval_loader_names, eval_loaders, eval_weights)
for name, loader, weights in evals:
acc = misc.maccuracy(algorithm, loader, weights, device)
results[name+'_acc'] = acc
results_keys = sorted(results.keys())
if results_keys != last_results_keys:
misc.print_row(results_keys, colwidth=12)
last_results_keys = results_keys
misc.print_row([results[key] for key in results_keys],
colwidth=12)
results.update({
'hparams': hparams,
'args': vars(args)
})
epochs_path = os.path.join(args.output_dir, 'results.jsonl')
with open(epochs_path, 'a') as f:
f.write(json.dumps(results, sort_keys=True) + "\n")
algorithm_dict = algorithm.state_dict()
start_step = step + 1
checkpoint_vals = collections.defaultdict(lambda: [])
if args.save_model_every_checkpoint:
save_checkpoint(f'model_step{step}.pkl')
with open(os.path.join(args.output_dir, 'done'), 'w') as f:
f.write('done')
| 10,857 | 37.778571 | 82 | py |
aidgn | aidgn-main/domainbed/lib/tsne.py | import argparse
import os
import pickle
import numpy as np
import torch
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
# def plot_TNSE(X_2d_tr, tr_labels, label_target_names, filename):
# colors = ["red", "green", "blue", "black", "brown", "grey", "orange", "yellow", "pink", "cyan", "magenta"]
# fig = plt.figure(figsize=(16, 16))
# ax = plt.axes(projection='3d')
# for i, label in zip(range(len(label_target_names)), label_target_names):
# ax.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], X_2d_tr[tr_labels == i, 2], c=colors[i], marker=".", label=label)
#
# plt.savefig(filename)
def plot_TNSE_domain(X_2d_tr, tr_labels, label_target_names, filename):
colors = ["black", "green", "blue", "orange", "brown", "grey", "orange", "yellow", "pink", "cyan", "magenta"]
plt.figure(figsize=(16, 16))
plt.tick_params(labelsize=23)
for i, label in zip(range(len(label_target_names)), label_target_names):
if (i<3):
plt.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], c=colors[i], marker=".", label=label)
else:
plt.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], c=colors[i], marker=".", label=label)
plt.savefig(filename)
def plot_TNSE_class(X_2d_tr, tr_labels, label_target_names, filename):
colors = ["red", "green", "blue", "brown", "orange", "yellow", "cyan", "magenta", "brown", "grey"]
plt.figure(figsize=(16, 16))
plt.tick_params(labelsize=23)
for i, label in zip(range(len(label_target_names)), label_target_names):
plt.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], c=colors[i], marker=".", label=label)
plt.savefig(filename)
def unique(list1):
unique_list = []
for x in list1:
if x not in unique_list:
unique_list.append(x)
return unique_list
def tsne_plot(Z_out, labels, domain_labels, dir_name):
labels = np.asarray(labels)
domain_labels = np.asarray(domain_labels)
label_target_names = unique(labels)
domain_label_target_names = unique(domain_labels)
tsne_model = TSNE(n_components=2, init="pca")
Z_2d = tsne_model.fit_transform(Z_out)
plot_TNSE_class(Z_2d, labels, label_target_names, dir_name + "Z_class_tSNE.png")
plot_TNSE_domain(Z_2d, domain_labels, domain_label_target_names, dir_name + "Z_domain_tSNE.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--plotdir", help="Path to configuration file")
bash_args = parser.parse_args()
dir_name = bash_args.plotdir
with open(dir_name + "Z_out.pkl", "rb") as fp:
Z_out = pickle.load(fp)
with open(dir_name + "Y_out.pkl", "rb") as fp:
Y_out = pickle.load(fp)
with open(dir_name + "Y_domain_out.pkl", "rb") as fp:
Y_domain_out = pickle.load(fp)
with open(dir_name + "Z_test.pkl", "rb") as fp:
Z_test = pickle.load(fp)
with open(dir_name + "Y_test.pkl", "rb") as fp:
Y_test = pickle.load(fp)
with open(dir_name + "Y_domain_test.pkl", "rb") as fp:
Y_domain_test = pickle.load(fp)
# Change label of target domain from -1 to #source_domains + 1
Y_domain_label = len(unique(Y_domain_out))
for i in range(len(Y_domain_test)):
Y_domain_test[i] = Y_domain_label
Z_out += Z_test
Y_out += Y_test
Y_domain_out += Y_domain_test
tsne_plot(Z_out, Y_out, Y_domain_out, dir_name) | 3,525 | 37.326087 | 142 | py |
aidgn | aidgn-main/domainbed/lib/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Things that don't belong anywhere else
"""
import hashlib
import json
import os
import sys
from shutil import copyfile
from collections import OrderedDict
from numbers import Number
import operator
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
import pickle
from collections import Counter
def make_weights_for_balanced_classes(dataset):
counts = Counter()
classes = []
for _, y in dataset:
y = int(y)
counts[y] += 1
classes.append(y)
n_classes = len(counts)
weight_per_class = {}
for y in counts:
weight_per_class[y] = 1 / (counts[y] * n_classes)
weights = torch.zeros(len(dataset))
for i, y in enumerate(classes):
weights[i] = weight_per_class[int(y)]
return weights
def pdb():
sys.stdout = sys.__stdout__
import pdb
print("Launching PDB, enter 'n' to step to parent function.")
pdb.set_trace()
def seed_hash(*args):
"""
Derive an integer hash from all args, for use as a random seed.
"""
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31)
def print_separator():
print("="*80)
def print_row(row, colwidth=10, latex=False):
if latex:
sep = " & "
end_ = "\\\\"
else:
sep = " "
end_ = ""
def format_val(x):
if np.issubdtype(type(x), np.floating):
x = "{:.10f}".format(x)
return str(x).ljust(colwidth)[:colwidth]
print(sep.join([format_val(x) for x in row]), end_)
class _SplitDataset(torch.utils.data.Dataset):
"""Used by split_dataset"""
def __init__(self, underlying_dataset, keys):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
def __getitem__(self, key):
return self.underlying_dataset[self.keys[key]]
def __len__(self):
return len(self.keys)
def split_dataset(dataset, n, seed=0):
"""
Return a pair of datasets corresponding to a random split of the given
dataset, with n datapoints in the first dataset and the rest in the last,
using the given random seed
"""
assert(n <= len(dataset))
keys = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(keys)
keys_1 = keys[:n]
keys_2 = keys[n:]
return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)
def random_pairs_of_minibatches(minibatches):
perm = torch.randperm(len(minibatches)).tolist()
pairs = []
for i in range(len(minibatches)):
j = i + 1 if i < (len(minibatches) - 1) else 0
xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1]
xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
return pairs
def accuracy(network, loader, weights, device):
correct = 0
total = 0
weights_offset = 0
network.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
p = network.predict(x)
if weights is None:
batch_weights = torch.ones(len(x))
else:
batch_weights = weights[weights_offset : weights_offset + len(x)]
weights_offset += len(x)
batch_weights = batch_weights.to(device)
if p.size(1) == 1:
correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
network.train()
return correct / total
class Tee:
def __init__(self, fname, mode="a"):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
class ParamDict(OrderedDict):
"""Code adapted from https://github.com/Alok/rl_implementations/tree/master/reptile.
A dictionary where the values are Tensors, meant to represent weights of
a model. This subclass lets you perform arithmetic on weights directly."""
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
def _prototype(self, other, op):
if isinstance(other, Number):
return ParamDict({k: op(v, other) for k, v in self.items()})
elif isinstance(other, dict):
return ParamDict({k: op(self[k], other[k]) for k in self})
else:
raise NotImplementedError
def __add__(self, other):
return self._prototype(other, operator.add)
def __rmul__(self, other):
return self._prototype(other, operator.mul)
__mul__ = __rmul__
def __neg__(self):
return ParamDict({k: -v for k, v in self.items()})
def __rsub__(self, other):
# a- b := a + (-b)
return self.__add__(other.__neg__())
__sub__ = __rsub__
def __truediv__(self, other):
return self._prototype(other, operator.truediv)
def maccuracy(network, loader, weights, device):
correct = 0
total = 0
weights_offset = 0
network.eval()
with torch.no_grad():
for x, y, _ in loader:
x = x.to(device)
y = y.to(device)
p = network.predict(x)
if weights is None:
batch_weights = torch.ones(len(x))
else:
batch_weights = weights[weights_offset : weights_offset + len(x)]
weights_offset += len(x)
batch_weights = batch_weights.to(device)
if p.size(1) == 1:
correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
network.train()
return correct / total
def get_feat_label(network, loader, device):
source_feats = None
source_labels = None
network.eval()
with torch.no_grad():
for x, y, _ in loader:
x = x.to(device)
y = y.to(device)
f = network.featurizer(x)
if source_feats is None:
source_feats = f
source_labels = y
else:
source_feats = torch.cat((source_feats, f), dim=0)
source_labels = torch.cat((source_labels, y), dim=0)
network.train()
return source_feats, source_labels
def knn_accuracy(network, loader, device, source_feats, source_labels, K=1):
correct = 0
total = 0
source_labels = source_labels.view(-1)
network.eval()
with torch.no_grad():
for x, y, _ in loader:
x = x.to(device)
y = y.to(device)
f = network.featurizer(x)
dist_matrix = cosine_matrix(f, source_feats)
idx = dist_matrix.argsort()[:,:K]
for i in range(x.size(0)):
total += 1
knn_labels = source_labels[idx[i]]
p_label = torch.bincount(knn_labels).argmax()
if (p_label == y[i]):
correct += 1
network.train()
return correct / total
def cosine_matrix(x,y):
x=F.normalize(x,dim=1)
y=F.normalize(y,dim=1)
xty=torch.sum(x.unsqueeze(1)*y.unsqueeze(0),2)
return 1-xty #x.size(0) * y.size(0)
| 7,663 | 28.251908 | 91 | py |
aidgn | aidgn-main/domainbed/lib/wide_resnet.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
From https://github.com/meliketoy/wide-resnet.pytorch
"""
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes, planes, kernel_size=1, stride=stride,
bias=True), )
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
"""Wide Resnet with the softmax layer chopped off"""
def __init__(self, input_shape, depth, widen_factor, dropout_rate):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
# print('| Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(input_shape[0], nStages[0])
self.layer1 = self._wide_layer(
wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(
wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(
wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.n_outputs = nStages[3]
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * (int(num_blocks) - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
return out[:, :, 0, 0]
| 3,242 | 29.885714 | 79 | py |
aidgn | aidgn-main/domainbed/lib/fast_data_loader.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
class _InfiniteSampler(torch.utils.data.Sampler):
"""Wraps another Sampler to yield an infinite stream."""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
for batch in self.sampler:
yield batch
class InfiniteDataLoader:
def __init__(self, dataset, weights, batch_size, num_workers):
super().__init__()
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(weights,
replacement=True,
num_samples=batch_size)
else:
sampler = torch.utils.data.RandomSampler(dataset,
replacement=True)
if weights == None:
weights = torch.ones(len(dataset))
batch_sampler = torch.utils.data.BatchSampler(
sampler,
batch_size=batch_size,
drop_last=True)
self._infinite_iterator = iter(torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=_InfiniteSampler(batch_sampler)
))
def __iter__(self):
while True:
yield next(self._infinite_iterator)
def __len__(self):
raise ValueError
class FastDataLoader:
"""DataLoader wrapper with slightly improved speed by not respawning worker
processes at every epoch."""
def __init__(self, dataset, batch_size, num_workers):
super().__init__()
batch_sampler = torch.utils.data.BatchSampler(
torch.utils.data.RandomSampler(dataset, replacement=False),
batch_size=batch_size,
drop_last=False
)
self._infinite_iterator = iter(torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=_InfiniteSampler(batch_sampler)
))
self._length = len(batch_sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self._infinite_iterator)
def __len__(self):
return self._length
| 2,156 | 28.148649 | 79 | py |
mff | mff-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'MFF'
copyright = '2018, Claudio Zeni, Aldo Glielmo, Adam Fekete, Alessandro De Vita'
author = 'Claudio Zeni, Aldo Glielmo, Adam Fekete, Alessandro De Vita'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
napoleon_google_docstring = True
# napoleon_use_param = False
# napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
add_module_names = True
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
modindex_common_prefix = ['mff', 'models']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': '',
'analytics_id': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 2,
'includehidden': True,
'titles_only': False
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/mff_logo_2.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
html_show_copyright = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MFF'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mff.tex', 'mff Documentation',
'Aldo, Claudio, Adam', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mff', 'mff Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mff', 'mff Documentation',
author, 'mff', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration ------------------------------------------------- | 6,686 | 28.588496 | 79 | py |
T-Concord3D | T-Concord3D-master/test.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import os
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from tqdm import tqdm
import math
from utils.metric_util import per_class_iu, fast_hist_crop, fast_ups_crop
from dataloader.pc_dataset import get_label_name, get_label_inv_name, update_config
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
import torch.nn.functional as F
from utils.load_save_util import load_checkpoint
from utils.ups import enable_dropout
import warnings
from torch.nn.parallel import DistributedDataParallel
warnings.filterwarnings("ignore")
def save_predictions_sematicKitti(predict_labels_serialized, predict_prob_serialized, path_to_seq_folder,
path_to_seq_folder_prob, sample_name, challenge=False):
# dump predictions and probability
predict_labels_serialized.tofile(path_to_seq_folder + '/' + sample_name + '.label')
if not challenge:
if not os.path.exists(path_to_seq_folder_prob):
os.makedirs(path_to_seq_folder_prob)
predict_prob_serialized.tofile(path_to_seq_folder_prob + '/' + sample_name + '.label')
def save_predictions_wod(predict_labels_serialized, predict_prob_serialized, path_to_seq_folder,
path_to_seq_folder_prob, sample_name, challenge=False):
# dump predictions and probability
np.save(os.path.join(path_to_seq_folder, sample_name), predict_labels_serialized)
if not challenge:
if not os.path.exists(path_to_seq_folder_prob):
os.makedirs(path_to_seq_folder_prob)
np.save(os.path.join(path_to_seq_folder_prob, sample_name), predict_prob_serialized)
def main(args):
os.environ['OMP_NUM_THREADS'] = "1"
distributed = False
if "WORLD_SIZE" in os.environ:
distributed = int(os.environ["WORLD_SIZE"]) > 1
print(f"distributed: {distributed}")
pytorch_device = args.local_rank
if distributed:
torch.cuda.set_device(pytorch_device)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
config_path = args.config_path
configs = load_config_data(config_path)
if args.mode == 'infer' or args.mode == 'val' or args.mode == 'test':
configs['train_params']['ssl'] = False
# send config parameters to pc_dataset
update_config(configs)
dataset_config = configs['dataset_params']
dataset_type = 'SemanticKITTI' if 'SemKITTI_sk_multiscan' == dataset_config['pc_dataset_type'] else 'WOD'
train_dataloader_config = configs['train_data_loader']
ssl_dataloader_config = configs['ssl_data_loader']
val_dataloader_config = configs['val_data_loader']
test_dataloader_config = configs['test_data_loader']
val_batch_size = val_dataloader_config['batch_size']
train_batch_size = train_dataloader_config['batch_size']
ssl_batch_size = ssl_dataloader_config['batch_size']
test_batch_size = test_dataloader_config['batch_size']
model_config = configs['model_params']
train_hypers = configs['train_params']
past_frame = train_hypers['past']
future_frame = train_hypers['future']
T_past_frame = train_hypers['T_past']
T_future_frame = train_hypers['T_future']
grid_size = model_config['output_shape']
num_class = model_config['num_class']
ignore_label = dataset_config['ignore_label']
model_load_path = train_hypers['model_load_path']
model_save_path = train_hypers['model_save_path']
SemKITTI_label_name = get_label_name(dataset_config["label_mapping"])
unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]
print(unique_label_str)
SemKITTI_learningmap_inv = get_label_inv_name(dataset_config["label_mapping"])
model = model_builder.build(model_config).to(pytorch_device)
print(f"model_load_path: {model_load_path}")
if os.path.exists(model_load_path):
model = load_checkpoint(model_load_path, model, map_location=pytorch_device)
print(f" loading model_load_path: {model_load_path}")
# if args.mgpus:
# my_model = nn.DataParallel(my_model)
# #my_model.cuda()
# #my_model.cuda()
if distributed:
model = DistributedDataParallel(
model,
device_ids=[pytorch_device],
output_device=args.local_rank,
find_unused_parameters=True
)
optimizer = optim.Adam(model.parameters(), lr=train_hypers["learning_rate"])
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label)
train_dataset_loader, val_dataset_loader, test_dataset_loader, ssl_dataset_loader = data_builder.build(
dataset_config,
train_dataloader_config,
val_dataloader_config,
test_dataloader_config,
ssl_dataloader_config,
grid_size=grid_size,
train_hypers=train_hypers)
# test and validation
if args.mode == 'val':
dataset_loader = val_dataset_loader
batch_size = val_batch_size
path_to_save_predicted_labels = val_dataloader_config['data_path'] # "val_result"
elif args.mode == 'test':
dataset_loader = test_dataset_loader
batch_size = test_batch_size
path_to_save_predicted_labels = test_dataloader_config['data_path'] # "test_result"
elif args.mode == 'infer':
dataset_loader = ssl_dataset_loader
batch_size = ssl_batch_size
path_to_save_predicted_labels = ssl_dataloader_config['data_path'] # "pseudo_label_result"
# mode to eval
model.eval()
# if uncertainty is used, enable dropout
if args.ups:
# enable dropout (mc)
enable_dropout(model)
# sample forward pass
f_pass = 10
with torch.no_grad():
ups_hist = []
hist_list = []
hist_list_op = []
ups_count = []
def validation_inference(vox_label, grid, pt_labs, pt_fea, ref_st_idx=None, ref_end_idx=None, lcw=None):
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in pt_fea]
val_grid_ten = [torch.from_numpy(i).to(pytorch_device) for i in grid]
if args.ups:
ups_out_prob = []
for _ in range(f_pass):
predict_labels_raw = model(val_pt_fea_ten, val_grid_ten, batch_size)
ups_out_prob.append(F.softmax(predict_labels_raw, dim=1)) # for selecting positive pseudo-labels
ups_out_prob = torch.stack(ups_out_prob)
out_std = torch.std(ups_out_prob, dim=0)
predict_probablity = torch.mean(ups_out_prob, dim=0)
predict_labels = torch.argmax(predict_probablity, dim=1)
# keep dimension during finding maximum
predict_prob_max, predict_prob_ind = torch.max(predict_probablity, dim=1, keepdim=True)
# squeeze (remove the 1 size form the tensor)
predict_prob_max = torch.squeeze(predict_prob_max)
# get the uncertainty of the most probable prediction
max_std = out_std.gather(1, predict_prob_ind)
# squeeze (remove the 1 size form the tensor)
max_std = torch.squeeze(max_std)
else:
predict_labels_raw = model(val_pt_fea_ten, val_grid_ten, batch_size)
predict_labels = torch.argmax(predict_labels_raw, dim=1)
predict_probablity = torch.nn.functional.softmax(predict_labels_raw, dim=1)
predict_prob_max, predict_prob_ind = predict_probablity.max(dim=1)
# move to cpu and detach to convert to numpy
predict_labels = predict_labels.cpu().detach().numpy()
predict_probabilitys = predict_prob_max.cpu().detach().numpy()
if args.ups:
model_uncertintys = max_std.cpu().detach().numpy()
for count, i_val_grid in enumerate(grid):
if args.save_raw:
predict_raw = predict_labels_raw[count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
predict_label = predict_labels[count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
predict_prob = predict_probabilitys[count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
if args.ups:
model_uncertainty = model_uncertintys[
count, grid[count][:, 0], grid[count][:, 1], grid[count][:, 2]]
model_uncertainty_serialized = np.array(model_uncertainty, dtype=np.float32)
predict_labels_serialized = np.array(predict_label, dtype=np.int32)
predict_prob_serialized = np.array(predict_prob, dtype=np.float32)
if args.save_raw:
predict_raw_serialized = np.array(predict_raw, dtype=np.float32)
demo_pt_labs = pt_labs[count]
# get reference frame start and end index
st_id, end_id = int(ref_st_idx[count]), int(ref_end_idx[count])
# only select the reference frame points
if ref_st_idx is not None:
predict_labels_serialized = predict_labels_serialized[st_id:st_id + end_id]
predict_prob_serialized = predict_prob_serialized[st_id:st_id + end_id]
if args.save_raw:
predict_raw_serialized = predict_raw_serialized[st_id:st_id + end_id]
demo_pt_labs = demo_pt_labs[st_id:st_id + end_id]
if args.ups:
model_uncertainty_serialized = model_uncertainty_serialized[st_id:st_id + end_id]
if args.mode == 'val':
hist_list.append(fast_hist_crop(predict_labels_serialized, demo_pt_labs,
unique_label))
if args.ups:
tmp_hist, temp_count = fast_ups_crop(model_uncertainty_serialized, demo_pt_labs.flatten(),
unique_label)
ups_hist.append(tmp_hist)
ups_count.append(temp_count)
if args.save:
# convert the prediction into corresponding GT labels (inverse mapping)
# for index, label in enumerate(predict_labels_serialized):
# predict_labels_serialized[index] = SemKITTI_learningmap_inv[label]
# print(predict_labels_serialized.size)
predict_labels_serialized = np.vectorize(SemKITTI_learningmap_inv.__getitem__)(predict_labels_serialized)
# get frame and sequence name
sample_name = dataset_loader.dataset.point_cloud_dataset.im_idx[i_iter_val * batch_size + count][
-10:-4]
sequence_num = dataset_loader.dataset.point_cloud_dataset.im_idx[i_iter_val * batch_size + count].split('/')[-3]
# create destination path to save predictions
# path_to_seq_folder = path_to_save_predicted_labels + '/' + str(sequence_num)
path_to_seq_folder = os.path.join(path_to_save_predicted_labels, str(sequence_num),
f"predictions_f{T_past_frame}_{T_future_frame}")
path_to_seq_folder_prob = os.path.join(path_to_save_predicted_labels, str(sequence_num),
f"probability_f{T_past_frame}_{T_future_frame}")
if args.save_raw:
path_to_seq_folder_raw = os.path.join(path_to_save_predicted_labels, str(sequence_num),
f"raw_f{T_past_frame}_{T_future_frame}")
if args.challenge:
path_to_save_test_predicted_labels = args.challenge_path
path_to_seq_folder = os.path.join(path_to_save_test_predicted_labels,
f"f{T_past_frame}_{T_future_frame}", "sequences",
str(sequence_num),
"predictions")
if not os.path.exists(path_to_seq_folder):
os.makedirs(path_to_seq_folder)
# dump predictions and probability
predict_labels_serialized.tofile(path_to_seq_folder + '/' + sample_name + '.label')
if dataset_type == 'SemanticKITTI':
save_predictions_sematicKitti(predict_labels_serialized, predict_prob_serialized,
path_to_seq_folder, path_to_seq_folder_prob, sample_name, challenge=args.challenge)
elif dataset_type == 'WOD':
save_predictions_wod(predict_labels_serialized, predict_prob_serialized,
path_to_seq_folder, path_to_seq_folder_prob, sample_name, challenge=args.challenge)
else:
raise Exception(f'{dataset_type} dataset type not known')
# if not args.challenge:
# if not os.path.exists(path_to_seq_folder_prob):
# os.makedirs(path_to_seq_folder_prob)
# predict_prob_serialized.tofile(path_to_seq_folder_prob + '/' + sample_name + '.label')
# if args.save_raw:
# if not os.path.exists(path_to_seq_folder_raw):
# os.makedirs(path_to_seq_folder_raw)
#
# predict_prob_serialized.tofile(path_to_seq_folder_raw + '/' + sample_name + '.label')
# Validation with multi-frames and ssl:
# if past_frame > 0 and train_hypers['ssl']:
for i_iter_val, (_, vox_label, grid, pt_labs, pt_fea, ref_st_idx, ref_end_idx, lcw) in tqdm(
enumerate(dataset_loader),
total=math.ceil(len(dataset_loader.dataset.point_cloud_dataset.im_idx) / batch_size)):
# call the validation and inference with
validation_inference(vox_label, grid, pt_labs, pt_fea, ref_st_idx=ref_st_idx, ref_end_idx=ref_end_idx,
lcw=lcw)
# print the validation per class iou and overall miou
if args.mode == 'val':
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(unique_label_str, iou):
print('%s : %.2f%%' % (class_name, class_iou * 100))
val_miou = np.nanmean(iou) * 100
print('Current val miou is %.3f' % val_miou)
if args.ups:
uncertainty_hist = np.sum(ups_hist, axis=0) / np.sum(ups_count, axis=0)
plt.bar(range(20), uncertainty_hist, width=0.4)
plt.show()
print(uncertainty_hist)
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('-y', '--config_path',
default='config/semantickitti/semantickitti_S0_0_T11_33_ssl_s20_p80.yaml')
parser.add_argument('-g', '--mgpus', action='store_true', default=False)
parser.add_argument('-m', '--mode', default='val')
parser.add_argument('-s', '--save', default=True)
parser.add_argument('-c', '--challenge', default=False)
parser.add_argument('-p', '--challenge_path', default='/mnt/personal/gebreawe/Datasets/RealWorld/semantic-kitti'
'/challenge')
parser.add_argument('-u', '--ups', default=False)
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('-r', '--save_raw', default=False)
args = parser.parse_args()
print(' '.join(sys.argv))
print(args)
main(args)
| 16,515 | 44.750693 | 137 | py |
T-Concord3D | T-Concord3D-master/train_tconcord3d.py | # -*- coding:utf-8 -*-
# author: Awet
import argparse
import os
import sys
import time
import warnings
import numpy as np
import torch
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel
from tqdm import tqdm
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
from dataloader.pc_dataset import get_label_name, update_config
from utils.load_save_util import load_checkpoint
from utils.metric_util import per_class_iu, fast_hist_crop
from utils.trainer_function import Trainer
import copy
warnings.filterwarnings("ignore")
# clear/empty cached memory used by caching allocator
torch.cuda.empty_cache()
torch.cuda.memory_summary(device=None, abbreviated=False)
# training
epoch = 0
best_val_miou = 0
global_iter = 0
def main(args):
# pytorch_device = torch.device("cuda:2") # torch.device('cuda:2')
# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'true'
# os.environ['MASTER_ADDR'] = 'localhost'
# os.environ['MASTER_PORT'] = '9994'
# os.environ['RANK'] = "0"
# If your script expects `--local_rank` argument to be set, please
# change it to read from `os.environ['LOCAL_RANK']` instead.
# args.local_rank = os.environ['LOCAL_RANK']
os.environ['OMP_NUM_THREADS'] = "1"
distributed = False
if "WORLD_SIZE" in os.environ:
distributed = int(os.environ["WORLD_SIZE"]) > 1
print(f"distributed: {distributed}")
pytorch_device = args.local_rank
if distributed:
torch.cuda.set_device(pytorch_device)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
config_path = args.config_path
configs = load_config_data(config_path)
# send configs parameters to pc_dataset
update_config(configs)
dataset_config = configs['dataset_params']
train_dataloader_config = configs['train_data_loader']
val_dataloader_config = configs['val_data_loader']
ssl_dataloader_config = configs['ssl_data_loader']
source_val_batch_size = val_dataloader_config['batch_size']
source_train_batch_size = train_dataloader_config['batch_size']
target_train_batch_size = ssl_dataloader_config['batch_size']
model_config = configs['model_params']
train_hypers = configs['train_params']
past_frame = train_hypers['past']
future_frame = train_hypers['future']
ssl = train_hypers['ssl']
grid_size = model_config['output_shape']
num_class = model_config['num_class']
ignore_label = dataset_config['ignore_label']
model_path = train_hypers['model_load_path']
model_path = train_hypers['model_save_path']
SemKITTI_label_name = get_label_name(dataset_config["label_mapping"])
# NB: no ignored class
unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]
model = model_builder.build(model_config).to(pytorch_device)
if os.path.exists(model_path):
model = load_checkpoint(model_path, model, map_location=pytorch_device)
# if args.mgpus:
# student_model = nn.DataParallel(student_model)
# #student_model.cuda()
# #student_model.cuda()
# student_model = student_model().to(pytorch_device)
# if args.local_rank >= 1:
if distributed:
model = DistributedDataParallel(
model,
device_ids=[pytorch_device],
output_device=args.local_rank,
find_unused_parameters=False # True
)
if ssl:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=False, ssl=True, fl=False)
else:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=False, fl=False)
source_train_dataset_loader, source_val_dataset_loader, _, target_train_dataset_loader = data_builder.build(
dataset_config,
train_dataloader_config,
val_dataloader_config,
ssl_dataloader_config=ssl_dataloader_config,
grid_size=grid_size,
train_hypers=train_hypers)
optimizer = optim.Adam(model.parameters(), lr=train_hypers["learning_rate"])
# scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer_student, max_lr=0.01,
# steps_per_epoch=len(source_train_dataset_loader),
# epochs=train_hypers["max_num_epochs"])
# global_iter = 0
check_iter = train_hypers['eval_every_n_steps']
global global_iter, best_val_miou, epoch
print("|-------------------------Training started-----------------------------------------|")
# Define training mode and function
trainer = Trainer(
model=model,
optimizer=optimizer,
ckpt_dir=model_path,
unique_label=unique_label,
unique_label_str=unique_label_str,
lovasz_softmax=lovasz_softmax,
loss_func=loss_func,
ignore_label=ignore_label,
train_mode="ema",
ssl=ssl,
eval_frequency=1,
pytorch_device=pytorch_device,
warmup_epoch=5,
ema_frequency=1)
trainer.fit(train_hypers["max_num_epochs"],
source_train_dataset_loader,
source_train_batch_size,
source_val_dataset_loader,
source_val_batch_size,
test_loader=None,
ckpt_save_interval=1,
lr_scheduler_each_iter=False)
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('-y', '--config_path',
default='config/semantickitti/nuscenes_T3_3.yaml')
parser.add_argument('-g', '--mgpus', action='store_true', default=False)
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
print(' '.join(sys.argv))
print(args)
main(args) | 6,366 | 33.79235 | 112 | py |
T-Concord3D | T-Concord3D-master/train.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import os
import time
import argparse
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from utils.metric_util import per_class_iu, fast_hist_crop
from dataloader.pc_dataset import get_label_name, update_config
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
from utils.load_save_util import load_checkpoint
import warnings
from torch.nn.parallel import DistributedDataParallel
warnings.filterwarnings("ignore")
# training
epoch = 0
best_val_miou = 0
global_iter = 0
def main(args):
os.environ['OMP_NUM_THREADS'] = "1"
distributed = False
if "WORLD_SIZE" in os.environ:
distributed = int(os.environ["WORLD_SIZE"]) > 1
print(f"distributed: {distributed}")
pytorch_device = args.local_rank
if distributed:
torch.cuda.set_device(pytorch_device)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
config_path = args.config_path
configs = load_config_data(config_path)
# send config parameters to pc_dataset
update_config(configs)
dataset_config = configs['dataset_params']
train_dataloader_config = configs['train_data_loader']
val_dataloader_config = configs['val_data_loader']
ssl_dataloader_config = configs['ssl_data_loader']
val_batch_size = val_dataloader_config['batch_size']
train_batch_size = train_dataloader_config['batch_size']
model_config = configs['model_params']
train_hypers = configs['train_params']
past_frame = train_hypers['past']
future_frame = train_hypers['future']
ssl = train_hypers['ssl']
grid_size = model_config['output_shape']
num_class = model_config['num_class']
ignore_label = dataset_config['ignore_label']
model_load_path = train_hypers['model_load_path']
model_save_path = train_hypers['model_save_path']
SemKITTI_label_name = get_label_name(dataset_config["label_mapping"])
unique_label = np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str = [SemKITTI_label_name[x] for x in unique_label + 1]
my_model = model_builder.build(model_config).to(pytorch_device)
if os.path.exists(model_load_path):
my_model = load_checkpoint(model_load_path, my_model, map_location=pytorch_device)
# if args.mgpus:
# my_model = nn.DataParallel(my_model)
# #my_model.cuda()
# #my_model.cuda()
if distributed:
my_model = DistributedDataParallel(
my_model,
device_ids=[pytorch_device],
output_device=args.local_rank,
find_unused_parameters=True
)
# for weighted class loss
weighted_class = False
# for focal loss
focal_loss = False # True
# 20 class number of samples from training sample
class_weights = np.array([1.40014903e+00, 1.10968683e+00, 5.06321920e+02, 9.19710291e+01,
1.76627589e+01, 1.58902791e+01, 1.49002594e+02, 6.12058299e+02,
1.75137027e+03, 2.47504075e-01, 3.25237847e+00, 3.62211985e-01,
8.77872638e+00, 4.08248861e-01, 9.97997655e-01, 1.91585640e-01,
7.21493239e+00, 4.68076958e-01, 1.69628483e+01, 6.35032127e+01], dtype=np.float32)
per_class_weight = None
if focal_loss or weighted_class:
per_class_weight = torch.from_numpy(class_weights).to(pytorch_device)
optimizer = optim.Adam(my_model.parameters(), lr=train_hypers["learning_rate"])
if ssl:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=per_class_weight, ssl=True, fl=focal_loss)
else:
loss_func, lovasz_softmax = loss_builder.build(wce=True, lovasz=True,
num_class=num_class, ignore_label=ignore_label,
weights=per_class_weight, fl=focal_loss)
train_dataset_loader, val_dataset_loader, _, _ = data_builder.build(dataset_config,
train_dataloader_config,
val_dataloader_config,
ssl_dataloader_config=ssl_dataloader_config,
grid_size=grid_size,
train_hypers=train_hypers)
class_count = np.zeros(20)
my_model.train()
# global_iter = 0
check_iter = train_hypers['eval_every_n_steps']
global global_iter, best_val_miou, epoch
print("|-------------------------Training started-----------------------------------------|")
print(f"focal_loss:{focal_loss}, weighted_cross_entropy: {weighted_class}")
while epoch < train_hypers['max_num_epochs']:
print(f"epoch: {epoch}")
loss_list = []
pbar = tqdm(total=len(train_dataset_loader))
time.sleep(5)
# lr_scheduler.step(epoch)
def valideting(hist_list, val_loss_list, val_vox_label, val_grid, val_pt_labs, val_pt_fea, ref_st_idx=None,
ref_end_idx=None, lcw=None):
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in val_pt_fea]
val_grid_ten = [torch.from_numpy(i).to(pytorch_device) for i in val_grid]
val_label_tensor = val_vox_label.type(torch.LongTensor).to(pytorch_device)
predict_labels = my_model(val_pt_fea_ten, val_grid_ten, val_batch_size)
# aux_loss = loss_fun(aux_outputs, point_label_tensor)
inp = val_label_tensor.size(0)
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
predict_labels = predict_labels[:inp, :, :, :, :]
if ssl:
lcw_tensor = torch.FloatTensor(lcw).to(pytorch_device)
loss = lovasz_softmax(torch.nn.functional.softmax(predict_labels).detach(), val_label_tensor,
ignore=ignore_label, lcw=lcw_tensor) + loss_func(predict_labels.detach(),
val_label_tensor, lcw=lcw_tensor)
else:
loss = lovasz_softmax(torch.nn.functional.softmax(predict_labels).detach(), val_label_tensor,
ignore=ignore_label) + loss_func(predict_labels.detach(), val_label_tensor)
predict_labels = torch.argmax(predict_labels, dim=1)
predict_labels = predict_labels.cpu().detach().numpy()
for count, i_val_grid in enumerate(val_grid):
hist_list.append(fast_hist_crop(predict_labels[
count, val_grid[count][:, 0], val_grid[count][:, 1],
val_grid[count][:, 2]], val_pt_labs[count],
unique_label))
val_loss_list.append(loss.detach().cpu().numpy())
return hist_list, val_loss_list
# if global_iter % check_iter == 0 and epoch >= 1:
if epoch >= 1:
my_model.eval()
hist_list = []
val_loss_list = []
with torch.no_grad():
# Validation with multi-frames and ssl:
# if past_frame > 0 and train_hypers['ssl']:
for i_iter_val, (_, vox_label, grid, pt_labs, pt_fea, ref_st_idx, ref_end_idx, val_lcw) \
in enumerate(val_dataset_loader):
# call the validation and inference with
hist_list, val_loss_list = valideting(hist_list, val_loss_list, vox_label, grid, pt_labs,
pt_fea, ref_st_idx=ref_st_idx,
ref_end_idx=ref_end_idx,
lcw=val_lcw)
print(f"--------------- epoch: {epoch} ----------------")
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(unique_label_str, iou):
print('%s : %.2f%%' % (class_name, class_iou * 100))
val_miou = np.nanmean(iou) * 100
# del val_vox_label, val_grid, val_pt_fea
# save model if performance is improved
if best_val_miou < val_miou:
best_val_miou = val_miou
if not os.path.exists(model_save_path.split('/')[-2]):
os.mkdir(os.path.join(model_save_path.split('/')[-2]))
torch.save(my_model.state_dict(), model_save_path)
print(f"Current val miou is {np.round(val_miou, 2)} while the best val miou is "
f"{np.round(best_val_miou, 2)}")
print(f"Current val loss is {np.round(np.mean(val_loss_list), 2)}")
def training(i_iter_train, train_vox_label, train_grid, pt_labels, train_pt_fea, ref_st_idx=None,
ref_end_idx=None, lcw=None):
global global_iter, best_val_miou, epoch
train_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in train_pt_fea]
train_vox_ten = [torch.from_numpy(i).to(pytorch_device) for i in train_grid]
point_label_tensor = train_vox_label.type(torch.LongTensor).to(pytorch_device)
# forward + backward + optimize
outputs = my_model(train_pt_fea_ten, train_vox_ten, train_batch_size)
inp = point_label_tensor.size(0)
# print(f"outputs.size() : {outputs.size()}")
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
outputs = outputs[:inp, :, :, :, :]
################################
if ssl:
lcw_tensor = torch.FloatTensor(lcw).to(pytorch_device)
loss = lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor, ignore=ignore_label,
lcw=lcw_tensor) + loss_func(
outputs, point_label_tensor, lcw=lcw_tensor)
else:
loss = lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor,
ignore=ignore_label) + loss_func(
outputs, point_label_tensor)
# TODO: check --> to mitigate only one element tensors can be converted to Python scalars
loss = loss.mean()
loss.backward()
optimizer.step()
loss_list.append(loss.item())
if global_iter % 1000 == 0:
if len(loss_list) > 0:
print('epoch %d iter %5d, loss: %.3f\n' %
(epoch, i_iter_train, np.mean(loss_list)))
else:
print('loss error')
optimizer.zero_grad()
global_iter += 1
if global_iter % 100 == 0:
pbar.update(100)
if global_iter % check_iter == 0:
if len(loss_list) > 0:
print('epoch %d iter %5d, loss: %.3f\n' %
(epoch, i_iter_train, np.mean(loss_list)))
else:
print('loss error')
my_model.train()
# training with multi-frames and ssl:
# if past_frame > 0 and train_hypers['ssl']:
for i_iter_train, (_, vox_label, grid, pt_labs, pt_fea, ref_st_idx, ref_end_idx, lcw) in enumerate(
train_dataset_loader):
# call the validation and inference with
training(i_iter_train, vox_label, grid, pt_labs, pt_fea, ref_st_idx=ref_st_idx, ref_end_idx=ref_end_idx,
lcw=lcw)
pbar.close()
epoch += 1
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('-y', '--config_path',
default='config/semantickitti/nuscenes_S0_0_T11_33_ssl_s20_p80.yaml')
parser.add_argument('-g', '--mgpus', action='store_true', default=False)
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
print(' '.join(sys.argv))
print(args)
main(args)
| 13,060 | 42.105611 | 120 | py |
T-Concord3D | T-Concord3D-master/builder/loss_builder.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import torch
from utils.lovasz_losses import lovasz_softmax, lovasz_softmax_lcw, cross_entropy_lcw
from utils.loss_func import FocalLoss
def build(wce=True, lovasz=True, num_class=20, ignore_label=None, weights=None, ssl=False, fl=False):
# focal loss and semisupervised learning
if ssl and fl:
if wce and lovasz:
return FocalLoss(weight=weights, ignore_index=ignore_label), lovasz_softmax_lcw
elif wce and not lovasz:
return wce
elif not wce and lovasz:
return lovasz_softmax_lcw
# only semi-supervised learning
if ssl:
if wce and lovasz:
return cross_entropy_lcw, lovasz_softmax_lcw
elif wce and not lovasz:
return wce
elif not wce and lovasz:
return lovasz_softmax_lcw
# focal loss on GT (fully supervised)
if fl:
loss_funs = FocalLoss(weight=weights, ignore_index=ignore_label)
else:
loss_funs = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)
if wce and lovasz:
return loss_funs, lovasz_softmax
elif wce and not lovasz:
return wce
elif not wce and lovasz:
return lovasz_softmax
else:
raise NotImplementedError
| 1,319 | 30.428571 | 101 | py |
T-Concord3D | T-Concord3D-master/builder/data_builder.py | # -*- coding:utf-8 -*-
import torch
from dataloader.dataset_semantickitti import get_model_class, collate_fn_BEV, collate_fn_BEV_tta
from dataloader.pc_dataset import get_pc_model_class
def build(dataset_config,
train_dataloader_config,
val_dataloader_config,
test_dataloader_config=None,
ssl_dataloader_config=None,
grid_size=[480, 360, 32], use_tta=False, train_hypers=None):
train_data_path = train_dataloader_config["data_path"]
train_imageset = train_dataloader_config["imageset"]
val_data_path = val_dataloader_config["data_path"]
val_imageset = val_dataloader_config["imageset"]
train_ref = train_dataloader_config["return_ref"]
val_ref = val_dataloader_config["return_ref"]
if test_dataloader_config is not None:
test_data_path = test_dataloader_config["data_path"]
test_imageset = test_dataloader_config["imageset"]
test_ref = test_dataloader_config["return_ref"]
# ssl data path for Semi-Supervised training
ssl_data_path = None
if ssl_dataloader_config is not None:
ssl_data_path = ssl_dataloader_config["data_path"]
ssl_imageset = ssl_dataloader_config["imageset"]
ssl_ref = ssl_dataloader_config["return_ref"]
label_mapping = dataset_config["label_mapping"]
SemKITTI = get_pc_model_class(dataset_config['pc_dataset_type'])
nusc = None
if "nusc" in dataset_config['pc_dataset_type']:
from nuscenes import NuScenes
nusc = NuScenes(version='v1.0-trainval', dataroot=train_data_path, verbose=True)
# if we want to train in SSL mode
if train_hypers and ssl_dataloader_config and train_hypers['ssl']:
train_pt_dataset = SemKITTI(train_data_path, imageset=train_imageset,
return_ref=train_ref, label_mapping=label_mapping,
train_hypers=train_hypers, ssl_data_path=ssl_data_path)
else:
train_pt_dataset = SemKITTI(train_data_path, imageset=train_imageset,
return_ref=train_ref, label_mapping=label_mapping,
train_hypers=train_hypers, ssl_data_path=None)
val_pt_dataset = SemKITTI(val_data_path, imageset=val_imageset,
return_ref=val_ref, label_mapping=label_mapping, train_hypers=train_hypers)
if test_dataloader_config is not None:
test_pt_dataset = SemKITTI(test_data_path, imageset=test_imageset,
return_ref=test_ref, label_mapping=label_mapping, train_hypers=train_hypers)
if ssl_dataloader_config is not None:
ssl_pt_dataset = SemKITTI(ssl_data_path, imageset=ssl_imageset,
return_ref=ssl_ref, label_mapping=label_mapping, train_hypers=train_hypers)
train_dataset = get_model_class(dataset_config['dataset_type'])(
train_pt_dataset,
grid_size=grid_size,
flip_aug=True,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
rotate_aug=True,
scale_aug=True,
transform_aug=True
)
if use_tta:
val_dataset = get_model_class(dataset_config['dataset_type'])(
val_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
rotate_aug=True,
scale_aug=True,
return_test=True,
use_tta=True
)
collate_fn_BEV_tmp = collate_fn_BEV_tta
else:
val_dataset = get_model_class(dataset_config['dataset_type'])(
val_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
)
collate_fn_BEV_tmp = collate_fn_BEV
if use_tta:
if test_dataloader_config is not None:
test_dataset = get_model_class(dataset_config['dataset_type'])(
test_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
rotate_aug=True,
scale_aug=True,
return_test=True,
use_tta=True
)
collate_fn_BEV_tmp = collate_fn_BEV_tta
else:
if test_dataloader_config is not None:
test_dataset = get_model_class(dataset_config['dataset_type'])(
test_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
)
collate_fn_BEV_tmp = collate_fn_BEV
if ssl_dataloader_config is not None:
ssl_dataset = get_model_class(dataset_config['dataset_type'])(
ssl_pt_dataset,
grid_size=grid_size,
fixed_volume_space=dataset_config['fixed_volume_space'],
max_volume_space=dataset_config['max_volume_space'],
min_volume_space=dataset_config['min_volume_space'],
ignore_label=dataset_config["ignore_label"],
)
train_dataset_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=train_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV,
shuffle=train_dataloader_config["shuffle"],
num_workers=train_dataloader_config["num_workers"])
val_dataset_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=val_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV_tmp,
shuffle=val_dataloader_config["shuffle"],
num_workers=val_dataloader_config["num_workers"])
test_dataset_loader = None
if test_dataloader_config is not None:
test_dataset_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=test_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV_tmp,
shuffle=test_dataloader_config["shuffle"],
num_workers=test_dataloader_config["num_workers"])
ssl_dataset_loader = None
if ssl_dataloader_config is not None:
ssl_dataset_loader = torch.utils.data.DataLoader(dataset=ssl_dataset,
batch_size=ssl_dataloader_config["batch_size"],
collate_fn=collate_fn_BEV,
shuffle=ssl_dataloader_config["shuffle"],
num_workers=ssl_dataloader_config["num_workers"])
return train_dataset_loader, val_dataset_loader, test_dataset_loader, ssl_dataset_loader
| 8,112 | 48.469512 | 111 | py |
T-Concord3D | T-Concord3D-master/utils/trainer_function.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# at 8/10/22
# --------------------------|
import argparse
import os
import sys
import time
import warnings
import numpy as np
import torch
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel
from tqdm import tqdm
from builder import data_builder, model_builder, loss_builder
from config.config import load_config_data
from dataloader.pc_dataset import get_label_name, update_config
from utils.load_save_util import load_checkpoint
from utils.metric_util import per_class_iu, fast_hist_crop
import copy
def yield_target_dataset_loader(n_epochs, target_train_dataset_loader):
for e in range(n_epochs):
for i_iter_train, (_, train_vox_label, train_grid, _, train_pt_fea, ref_st_idx, ref_end_idx, lcw) \
in enumerate(target_train_dataset_loader):
yield train_vox_label, train_grid, train_pt_fea, ref_st_idx, ref_end_idx, lcw
class Trainer(object):
def __init__(self,
model,
optimizer,
ckpt_dir,
unique_label,
unique_label_str,
lovasz_softmax,
loss_func,
ignore_label,
train_mode=None,
ssl=None,
eval_frequency=1,
pytorch_device=0,
warmup_epoch=1,
ema_frequency=5):
self.model = model
self.optimizer = optimizer
self.model_save_path = ckpt_dir
self.unique_label = unique_label
self.unique_label_str = unique_label_str
self.eval_frequency = eval_frequency
self.lovasz_softmax = lovasz_softmax
self.loss_func = loss_func
self.ignore_label = ignore_label
self.train_mode = train_mode
self.ssl = ssl
self.pytorch_device = pytorch_device
self.warmup_epoch = warmup_epoch
self.ema_frequency = ema_frequency
self.val = False
self.best_val_miou = 0
self.progress_value = 100
def criterion(self, outputs, point_label_tensor, lcw=None):
if self.ssl:
lcw_tensor = torch.FloatTensor(lcw).to(self.pytorch_device)
loss = self.lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor,
ignore=self.ignore_label, lcw=lcw_tensor) \
+ self.loss_func(outputs, point_label_tensor, lcw=lcw_tensor)
else:
loss = self.lovasz_softmax(torch.nn.functional.softmax(outputs), point_label_tensor,
ignore=self.ignore_label) \
+ self.loss_func(outputs, point_label_tensor)
return loss
def validate(self, my_model, val_dataset_loader, val_batch_size, test_loader=None, ssl=None):
hist_list = []
val_loss_list = []
my_model.eval()
with torch.no_grad():
for i_iter_val, (
_, val_vox_label, val_grid, val_pt_labs, val_pt_fea, ref_st_idx, ref_end_idx, lcw) in enumerate(
val_dataset_loader):
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(self.pytorch_device) for i in
val_pt_fea]
val_grid_ten = [torch.from_numpy(i).to(self.pytorch_device) for i in val_grid]
val_label_tensor = val_vox_label.type(torch.LongTensor).to(self.pytorch_device)
predict_labels = my_model(val_pt_fea_ten, val_grid_ten, val_batch_size)
# aux_loss = loss_fun(aux_outputs, point_label_tensor)
inp = val_label_tensor.size(0)
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
predict_labels = predict_labels[:inp, :, :, :, :]
# loss = self.criterion(predict_labels, val_label_tensor, lcw)
predict_labels = torch.argmax(predict_labels, dim=1)
predict_labels = predict_labels.cpu().detach().numpy()
for count, i_val_grid in enumerate(val_grid):
hist_list.append(fast_hist_crop(predict_labels[
count, val_grid[count][:, 0], val_grid[count][:, 1],
val_grid[count][:, 2]], val_pt_labs[count],
self.unique_label))
# val_loss_list.append(loss.detach().cpu().numpy())
return hist_list, val_loss_list
def fit(self, n_epochs, source_train_dataset_loader, train_batch_size, val_dataset_loader,
val_batch_size, test_loader=None, ckpt_save_interval=1, lr_scheduler_each_iter=False):
global_iter = 1
best_val_miou = 0
for epoch in range(n_epochs):
pbar = tqdm(total=len(source_train_dataset_loader))
# train the model
loss_list = []
self.model.train()
# training with multi-frames and ssl:
for i_iter_train, (
_, train_vox_label, train_grid, _, train_pt_fea, ref_st_idx, ref_end_idx, lcw) in enumerate(
source_train_dataset_loader):
# call the validation and inference with
train_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(self.pytorch_device) for i in
train_pt_fea]
# train_grid_ten = [torch.from_numpy(i[:,:2]).to(self.pytorch_device) for i in train_grid]
train_vox_ten = [torch.from_numpy(i).to(self.pytorch_device) for i in train_grid]
point_label_tensor = train_vox_label.type(torch.LongTensor).to(self.pytorch_device)
# forward + backward + optimize
outputs = self.model(train_pt_fea_ten, train_vox_ten, train_batch_size)
inp = point_label_tensor.size(0)
# print(f"outputs.size() : {outputs.size()}")
# TODO: check if this is correctly implemented
# hack for batch_size mismatch with the number of training example
outputs = outputs[:inp, :, :, :, :]
################################
loss = self.criterion(outputs, point_label_tensor, lcw)
# TODO: check --> to mitigate only one element tensors can be converted to Python scalars
loss = loss.mean()
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
# Uncomment to use the learning rate scheduler
# scheduler.step()
loss_list.append(loss.item())
if global_iter % self.progress_value == 0:
pbar.update(self.progress_value)
if len(loss_list) > 0:
print('epoch %d iter %5d, loss: %.3f\n' % (epoch, i_iter_train, np.mean(loss_list)))
else:
print('loss error')
global_iter += 1
# ----------------------------------------------------------------------#
# Evaluation/validation
with torch.no_grad():
hist_list, val_loss_list = self.validate(self.model, val_dataset_loader, val_batch_size,
test_loader, self.ssl)
# ----------------------------------------------------------------------#
# Print validation mIoU and Loss
print(f"--------------- epoch: {epoch} ----------------")
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(self.unique_label_str, iou):
print('%s : %.2f%%' % (class_name, class_iou * 100))
val_miou = np.nanmean(iou) * 100
# del val_vox_label, val_grid, val_pt_fea
# save model if performance is improved
if best_val_miou < val_miou:
best_val_miou = val_miou
torch.save(self.model.state_dict(), self.model_save_path)
print('Current val miou is %.3f while the best val miou is %.3f' %
(val_miou, best_val_miou))
# print('Current val loss is %.3f' % (np.mean(val_loss_list)))
| 8,482 | 42.953368 | 116 | py |
T-Concord3D | T-Concord3D-master/utils/load_save_util.py | # -*- coding:utf-8 -*-
import torch
def load_checkpoint(model_load_path, model, map_location=None):
my_model_dict = model.state_dict()
if map_location is not None:
pre_weight = torch.load(model_load_path, map_location=f'cuda:{map_location}')
else:
pre_weight = torch.load(model_load_path)
part_load = {}
match_size = 0
nomatch_size = 0
for k in pre_weight.keys():
value = pre_weight[k]
if k[:7] == 'module.':
k=k[7:]
if k in my_model_dict and my_model_dict[k].shape == value.shape:
#print("loading ", k)
match_size += 1
part_load[k] = value
else:
nomatch_size += 1
print("matched parameter sets: {}, and no matched: {}".format(match_size, nomatch_size))
my_model_dict.update(part_load)
model.load_state_dict(my_model_dict)
return model
def load_checkpoint_1b1(model_load_path, model):
my_model_dict = model.state_dict()
pre_weight = torch.load(model_load_path)
part_load = {}
match_size = 0
nomatch_size = 0
pre_weight_list = [*pre_weight]
my_model_dict_list = [*my_model_dict]
for idx in range(len(pre_weight_list)):
key_ = pre_weight_list[idx]
key_2 = my_model_dict_list[idx]
value_ = pre_weight[key_]
if my_model_dict[key_2].shape == pre_weight[key_].shape:
# print("loading ", k)
match_size += 1
part_load[key_2] = value_
else:
print(key_)
print(key_2)
nomatch_size += 1
print("matched parameter sets: {}, and no matched: {}".format(match_size, nomatch_size))
my_model_dict.update(part_load)
model.load_state_dict(my_model_dict)
return model
| 1,772 | 26.703125 | 92 | py |
T-Concord3D | T-Concord3D-master/utils/lovasz_losses.py | # -*- coding:utf-8 -*-
# author: Xinge
"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
"""
from __future__ import print_function, division
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / float(union)
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image
return 100 * np.array(ious)
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
elif probas.dim() == 5:
#3D segmentation
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H*W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
#--------------------------------------------------------------------------------------------------------------------#
#---------------------- segmentation confidence probability waited loss function---------------------#
def cross_entropy_lcw(probas, labels, ignore_label=0, weights=None, lcw=None):
raw_loss_funs = torch.nn.CrossEntropyLoss(ignore_index=ignore_label, reduction='none') # weight label as GT and pseudo
#los_func = torch.nn.CrossEntropyLoss(ignore_index=ignore_label)
if lcw is not None:
#loss = los_func(probas, labels)
norm_lcw = (lcw/100.0)
raw_loss = raw_loss_funs(probas, labels)
weighted_loss = (raw_loss * lcw).mean()
else:
raise "error: per label weight is none"
return weighted_loss
def lovasz_softmax_lcw(probas, labels, classes='present', per_image=False, ignore=None, lcw=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat_lcw(*flatten_probas_lcw(prob.unsqueeze(0), lab.unsqueeze(0), ignore, lcw), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat_lcw(*flatten_probas_lcw(probas, labels, ignore, lcw), classes=classes)
return loss
def lovasz_softmax_flat_lcw(probas, labels, lcw=None, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
# multiply loss/error by the confidence probability
norm_lcw = (lcw/100.0)
errors *= norm_lcw
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas_lcw(probas, labels, ignore=None, lcw=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
elif probas.dim() == 5:
#3D segmentation
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H*W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
lcw = lcw.view(-1)
if ignore is None:
return probas, labels, lcw
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
vlcw = lcw[valid]
return vprobas, vlabels, vlcw
#---------------------End of prediction confidence weighted lovasz_softmax loss ----------------------------#
#--------------------------------------------------------------------------------------------------------------------#
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
def jaccard_loss(probas, labels,ignore=None, smooth = 100, bk_class = None):
"""
Something wrong with this loss
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
true_1_hot = torch.eye(vprobas.shape[1])[vlabels]
if bk_class:
one_hot_assignment = torch.ones_like(vlabels)
one_hot_assignment[vlabels == bk_class] = 0
one_hot_assignment = one_hot_assignment.float().unsqueeze(1)
true_1_hot = true_1_hot*one_hot_assignment
true_1_hot = true_1_hot.to(vprobas.device)
intersection = torch.sum(vprobas * true_1_hot)
cardinality = torch.sum(vprobas + true_1_hot)
loss = (intersection + smooth / (cardinality - intersection + smooth)).mean()
return (1-loss)*smooth
def hinge_jaccard_loss(probas, labels,ignore=None, classes = 'present', hinge = 0.1, smooth =100):
"""
Multi-class Hinge Jaccard loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
C = vprobas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
if c in vlabels:
c_sample_ind = vlabels == c
cprobas = vprobas[c_sample_ind,:]
non_c_ind =np.array([a for a in class_to_sum if a != c])
class_pred = cprobas[:,c]
max_non_class_pred = torch.max(cprobas[:,non_c_ind],dim = 1)[0]
TP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.) + smooth
FN = torch.sum(torch.clamp(max_non_class_pred - class_pred, min = -hinge)+hinge)
if (~c_sample_ind).sum() == 0:
FP = 0
else:
nonc_probas = vprobas[~c_sample_ind,:]
class_pred = nonc_probas[:,c]
max_non_class_pred = torch.max(nonc_probas[:,non_c_ind],dim = 1)[0]
FP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.)
losses.append(1 - TP/(TP+FP+FN))
if len(losses) == 0: return 0
return mean(losses)
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
| 15,675 | 36.864734 | 131 | py |
T-Concord3D | T-Concord3D-master/utils/loss_func.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, weight=None, ignore_index=None,
gamma=2., reduction='none', ssl=False):
nn.Module.__init__(self)
self.ignore_index = ignore_index
self.weight = weight
self.gamma = gamma
self.reduction = reduction
self.ssl = ssl
def forward(self, input_tensor, target_tensor, lcw=None):
log_prob = F.log_softmax(input_tensor, dim=1)
prob = torch.exp(log_prob)
raw_loss = F.nll_loss(
((1 - prob) ** self.gamma) * log_prob,
target_tensor,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index
)
if self.ssl and lcw is not None:
norm_lcw = (lcw/100.0)
weighted_loss = (raw_loss * lcw).mean()
return weighted_loss
else:
return raw_loss.mean()
class WeightedFocalLoss(nn.Module):
"Non weighted version of Focal Loss"
def __init__(self, weight=None, ignore_index=None,
gamma=2., reduction='none', ssl=False):
super().__init__()
self.ignore_index = ignore_index
self.weight = weight
self.gamma = gamma
self.reduction = reduction
self.ssl = ssl
def forward(self, inputs, targets):
inputs = inputs.squeeze()
targets = targets.squeeze()
BCE_loss = F.cross_entropy(inputs, targets, reduction='none')
pt = torch.exp(-BCE_loss)
F_loss = self.weights[targets]*(1-pt)**self.gamma * BCE_loss
return F_loss.mean()
| 1,766 | 28.949153 | 69 | py |
T-Concord3D | T-Concord3D-master/model/cylinder_3d.py | # -*- coding:utf-8 -*-
import torch
from torch import nn
REGISTERED_MODELS_CLASSES = {}
def register_model(cls, name=None):
global REGISTERED_MODELS_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_MODELS_CLASSES, f"exist class: {REGISTERED_MODELS_CLASSES}"
REGISTERED_MODELS_CLASSES[name] = cls
return cls
def get_model_class(name):
global REGISTERED_MODELS_CLASSES
assert name in REGISTERED_MODELS_CLASSES, f"available class: {REGISTERED_MODELS_CLASSES}"
return REGISTERED_MODELS_CLASSES[name]
@register_model
class cylinder_asym(nn.Module):
def __init__(self,
cylin_model,
segmentator_spconv,
sparse_shape,
):
super().__init__()
self.name = "cylinder_asym"
self.cylinder_3d_generator = cylin_model
self.cylinder_3d_spconv_seg = segmentator_spconv
self.sparse_shape = sparse_shape
def forward(self, train_pt_fea_ten, train_vox_ten, batch_size, val_grid=None, voting_num=4, use_tta=False):
coords, features_3d = self.cylinder_3d_generator(train_pt_fea_ten, train_vox_ten)
# spatial_features = self.cylinder_3d_spconv_seg(features_3d, coords, batch_size)
#
# return spatial_features
if use_tta:
batch_size *= voting_num
spatial_features = self.cylinder_3d_spconv_seg(features_3d, coords, batch_size)
if use_tta:
features_ori = torch.split(spatial_features, 1, dim=0)
fused_predict = features_ori[0][0, :, val_grid[0][:, 0], val_grid[0][:, 1], val_grid[0][:, 2]]
for idx in range(1, voting_num, 1):
fused_predict += features_ori[idx][0, :, val_grid[idx][:, 0], val_grid[idx][:, 1], val_grid[idx][:, 2]]
return fused_predict
else:
return spatial_features
| 1,901 | 31.793103 | 119 | py |
T-Concord3D | T-Concord3D-master/model/segment_3d.py | # -*- coding:utf-8 -*-
import numpy as np
#import spconv
import spconv.pytorch as spconv
import torch
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
#elf.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
#self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
shortcut = self.conv1(x)
shortcut = shortcut.replace_feature(self.act1(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0(shortcut.features))
shortcut = self.conv1_2(shortcut)
shortcut = shortcut.replace_feature(self.act1_2(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0_2(shortcut.features))
resA = self.conv2(x)
resA = resA.replace_feature(self.act2(resA.features))
resA = resA.replace_feature(self.bn1(resA.features))
resA = self.conv3(resA)
resA = resA.replace_feature(self.act3(resA.features))
resA = resA.replace_feature(self.bn2(resA.features))
resA = resA.replace_feature(resA.features + shortcut.features)
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
#self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
# self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key + "bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
# self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key + "bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
if pooling:
if height_pooling:
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2, 2, 1),
padding=1, indice_key=indice_key, bias=False)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
shortcut = self.conv1(x)
shortcut = shortcut.replace_feature(self.act1(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0(shortcut.features))
shortcut = self.conv1_2(shortcut)
shortcut = shortcut.replace_feature(self.act1_2(shortcut.features))
shortcut = shortcut.replace_feature(self.bn0_2(shortcut.features))
resA = self.conv2(x)
resA = resA.replace_feature(self.act2(resA.features))
resA = resA.replace_feature(self.bn1(resA.features))
resA = self.conv3(resA)
resA = resA.replace_feature(self.act3(resA.features))
resA = resA.replace_feature(self.bn2(resA.features))
resA = resA.replace_feature(resA.features + shortcut.features)
if self.pooling:
resB = self.pool(resA)
return resB, resA
else:
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None, dropout_rate=0.25):
super(UpBlock, self).__init__()
#self.drop_out = drop_out
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key + "new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
#self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key + "bef")
self.conv2 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
#self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# TODO: commnet this drop out after experiment
# self.dropout3 = nn.Dropout3d(p=dropout_rate) # added by me
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x, skip):
upA = self.trans_dilao(x)
upA = upA.replace_feature(self.trans_act(upA.features))
upA = upA.replace_feature(self.trans_bn(upA.features))
## upsample
upA = self.up_subm(upA)
upA = upA.replace_feature(upA.features + skip.features)
upE = self.conv1(upA)
upE = upE.replace_feature(self.act1(upE.features))
upE = upE.replace_feature(self.bn1(upE.features))
upE = self.conv2(upE)
upE = upE.replace_feature(self.act2(upE.features))
upE = upE.replace_feature(self.bn2(upE.features))
upE = self.conv3(upE)
upE = upE.replace_feature(self.act3(upE.features))
# TODO: comment this drop out after experiment
# upE = upE.replace_feature(self.bn3(self.dropout3(upE.features))) # added by me
upE = upE.replace_feature(self.bn3(upE.features)) # original implementation
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key + "bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
def forward(self, x):
shortcut = self.conv1(x)
shortcut = shortcut.replace_feature(self.bn0(shortcut.features))
shortcut = shortcut.replace_feature(self.act1(shortcut.features))
shortcut2 = self.conv1_2(x)
shortcut2 = shortcut2.replace_feature(self.bn0_2(shortcut2.features))
shortcut2 = shortcut2.replace_feature(self.act1_2(shortcut2.features))
shortcut3 = self.conv1_3(x)
shortcut3 = shortcut.replace_feature(self.bn0_3(shortcut3.features))
shortcut3 = shortcut3.replace_feature(self.act1_3(shortcut3.features))
shortcut = shortcut.replace_feature(shortcut.features + shortcut2.features + shortcut3.features)
shortcut = shortcut.replace_feature(shortcut.features * x.features)
return shortcut
class Asymm_3d_spconv(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses=20, n_height=32, strict=False, init_size=16):
super(Asymm_3d_spconv, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False,
indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False,
indice_key="down5")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
self.ReconNet = ReconBlock(2 * init_size, 2 * init_size, indice_key="recon")
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1,
bias=True)
def forward(self, voxel_features, coors, batch_size):
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e = up0e.replace_feature(torch.cat((up0e.features, up1e.features), 1))
logits = self.logits(up0e)
y = logits.dense()
return y
| 13,253 | 41.07619 | 121 | py |
T-Concord3D | T-Concord3D-master/model/cylinder_feature.py | # -*- coding:utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import numba as nb
import multiprocessing
import torch_scatter
class cylinder_fea(nn.Module):
def __init__(self, grid_size, fea_dim=3,
out_pt_fea_dim=64, max_pt_per_encode=64, fea_compre=None):
super(cylinder_fea, self).__init__()
self.PPmodel = nn.Sequential(
nn.BatchNorm1d(fea_dim),
nn.Linear(fea_dim, 64),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Linear(64, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, out_pt_fea_dim)
)
self.max_pt = max_pt_per_encode
self.fea_compre = fea_compre
self.grid_size = grid_size
kernel_size = 3
self.local_pool_op = torch.nn.MaxPool2d(kernel_size, stride=1,
padding=(kernel_size - 1) // 2,
dilation=1)
self.pool_dim = out_pt_fea_dim
# point feature compression
if self.fea_compre is not None:
self.fea_compression = nn.Sequential(
nn.Linear(self.pool_dim, self.fea_compre),
nn.ReLU())
self.pt_fea_dim = self.fea_compre
else:
self.pt_fea_dim = self.pool_dim
def forward(self, pt_fea, xy_ind):
cur_dev = pt_fea[0].get_device()
# concate everything
cat_pt_ind = []
# for i_batch in range(len(xy_ind)):
# cat_pt_ind.append(F.pad(xy_ind[i_batch], (1, 0), 'constant', value=i_batch))
# Awet Optimized append into list comprehension for faster runtime
cat_pt_ind = [F.pad(xy_ind[i_batch], (1, 0), 'constant', value=i_batch) for i_batch in range(len(xy_ind)) ]
cat_pt_fea = torch.cat(pt_fea, dim=0)
cat_pt_ind = torch.cat(cat_pt_ind, dim=0)
pt_num = cat_pt_ind.shape[0]
# shuffle the data
shuffled_ind = torch.randperm(pt_num, device=cur_dev)
cat_pt_fea = cat_pt_fea[shuffled_ind, :]
cat_pt_ind = cat_pt_ind[shuffled_ind, :]
# unique xy grid index
unq, unq_inv, unq_cnt = torch.unique(cat_pt_ind, return_inverse=True, return_counts=True, dim=0)
unq = unq.type(torch.int64)
# process feature
processed_cat_pt_fea = self.PPmodel(cat_pt_fea)
pooled_data = torch_scatter.scatter_max(processed_cat_pt_fea, unq_inv, dim=0)[0]
if self.fea_compre:
processed_pooled_data = self.fea_compression(pooled_data)
else:
processed_pooled_data = pooled_data
return unq, processed_pooled_data
| 2,812 | 30.965909 | 115 | py |
T-Concord3D | T-Concord3D-master/dataloader/dataset_semantickitti.py | # -*- coding:utf-8 -*-
"""
SemKITTI dataloader
"""
import os
import numpy as np
import torch
import random
import time
import numba as nb
import yaml
from torch.utils import data
import pickle
REGISTERED_DATASET_CLASSES = {}
def register_dataset(cls, name=None):
global REGISTERED_DATASET_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_DATASET_CLASSES, f"exist class: {REGISTERED_DATASET_CLASSES}"
REGISTERED_DATASET_CLASSES[name] = cls
return cls
def get_model_class(name):
global REGISTERED_DATASET_CLASSES
assert name in REGISTERED_DATASET_CLASSES, f"available class: {REGISTERED_DATASET_CLASSES}"
return REGISTERED_DATASET_CLASSES[name]
@register_dataset
class voxel_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, 50, 1.5], min_volume_space=[-50, -50, -3],
cut_mix=False):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.flip_aug = flip_aug
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
# TODO check if the cut and mix augmentation is implemented correctly
self.cut_mix = cut_mix
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
# initialization
xyz = None
labels = None
sig = None
lcw = None
ref_st_ind = None
ref_end_ind = None
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 5:
xyz, labels, sig, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz, labels, sig, lcw, ref_st_ind, ref_end_ind = data
else:
raise Exception('Return invalid data tuple')
# TODO: check --------------------------------
# cut mix data augmentation by grabbing instance and add it to a new scene
if self.cut_mix:
# load/grab the object
dir = '/mnt/beegfs/gpu/argoverse-tracking-all-training/WOD/processed/Labeled/cut_mix'
new_xyz = np.load(f"{dir}/pcl.npy")
new_label_all = np.load(f"{dir}/ss_id.npy")
unique_obj = np.unique(new_label_all[:, 0])
sel_obj_rand = np.random.choice(len(unique_obj), 5)
new_label = []
for id in sel_obj_rand:
obj_mask = new_label_all[:, 0] == id
new_label.append(new_label_all[obj_mask])
new_label = np.concatenate(new_label, axis=0)
# perform random flipping and rotation
flip_type = np.random.choice(4, 1)
if flip_type == 1:
new_xyz[:, 0] = -new_xyz[:, 0]
elif flip_type == 2:
new_xyz[:, 1] = -new_xyz[:, 1]
elif flip_type == 3:
new_xyz[:, :2] = -new_xyz[:, :2]
rotate_rad = np.deg2rad(np.random.random() * 360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
new_xyz[:, :2] = np.dot(new_xyz[:, :2], j)
xyz = np.concatenate(xyz, new_xyz[:, :3], axis=0)
labels = np.concatenate(labels, new_label[:, 1], axis=0)
if sig is not None:
sig = np.concatenate(sig, new_xyz[:, 3], axis=0)
if lcw is not None:
lcw = np.concatenate(lcw, np.ones_like(new_label[:, 1]), axis=0)
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
max_bound = np.percentile(xyz, 100, axis=0)
min_bound = np.percentile(xyz, 0, axis=0)
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
# process voxel position
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
# process labels
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
# TODO: check if there is lcw label confidence weight
if len(data) == 6:
# process the lcw
processed_lcw = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
lcw_voxel_pair = np.concatenate([grid_ind, lcw], axis=1)
lcw_voxel_pair = lcw_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_lcw = nb_process_label(np.copy(processed_lcw), lcw_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]),
axis=1) # np.concatenate((return_xyz, sig), axis=1)#
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
if len(data) == 6:
data_tuple += (processed_lcw, ref_st_ind, ref_end_ind)
elif len(data) == 5:
data_tuple += (ref_st_ind, ref_end_ind)
return data_tuple
# transformation between Cartesian coordinates and polar coordinates
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:, 0] ** 2 + input_xyz[:, 1] ** 2)
phi = np.arctan2(input_xyz[:, 1], input_xyz[:, 0])
return np.stack((rho, phi, input_xyz[:, 2]), axis=1)
def polar2cat(input_xyz_polar):
# print(input_xyz_polar.shape)
x = input_xyz_polar[0] * np.cos(input_xyz_polar[1])
y = input_xyz_polar[0] * np.sin(input_xyz_polar[1])
return np.stack((x, y, input_xyz_polar[2]), axis=0)
@register_dataset
class cylinder_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 2], min_volume_space=[0, -np.pi, -4],
scale_aug=False,
transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi / 4, max_rad=np.pi / 4,
cut_mix=False, use_tta=False):
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.transform = transform_aug
self.trans_std = trans_std
self.cut_mix = cut_mix
self.use_tta = use_tta
self.noise_rotation = np.random.uniform(min_rad, max_rad)
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def rotation_points_single_angle(self, points, angle, axis=0):
# points: [N, 3]
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
if axis == 1:
rot_mat_T = np.array(
[[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],
dtype=points.dtype)
elif axis == 2 or axis == -1:
rot_mat_T = np.array(
[[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],
dtype=points.dtype)
elif axis == 0:
rot_mat_T = np.array(
[[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],
dtype=points.dtype)
else:
raise ValueError("axis should in range")
return points @ rot_mat_T
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if self.use_tta:
data_total = []
voting = 4
for idx in range(voting):
data_single_ori = self.get_single_sample(data, index, idx)
data_total.append(data_single_ori)
data_total = tuple(data_total)
return data_total
else:
data_single = self.get_single_sample(data, index)
return data_single
def get_single_sample(self, data, index, vote_idx=0):
split = self.point_cloud_dataset.imageset
# initialization
xyz = None
labels = None
sig = None
lcw = None
ref_st_ind = None
ref_end_ind = None
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 5:
xyz, labels, sig, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz, labels, sig, lcw, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# TODO: check -----------------------------------------------------
# cut mix data augmentation by grabbing instance and add it to a new scene
if self.cut_mix and ((split == 'train') or (split == 'ssl')):
# load/grab the object
dir = '/mnt/beegfs/gpu/argoverse-tracking-all-training/WOD/processed/Labeled/cut_mix'
new_xyz_all = np.load(f"{dir}/pcl.npy")
new_label_all = np.load(f"{dir}/ss_id.npy")
unique_obj = np.unique(new_label_all[:, 0])
num_object = 10
sel_obj_rand = np.random.choice(len(unique_obj), num_object)
aug_label = []
aug_xyz = []
for id in sel_obj_rand:
obj_mask = new_label_all[:, 0] == id
new_label = new_label_all[obj_mask]
new_xyz = new_xyz_all[obj_mask]
# perform random mix/placement on the road
road_mask = np.squeeze(labels) == 18
road_pcl = xyz[road_mask]
mix_pos_rand = np.random.choice(len(road_pcl), 1)
mix_position = road_pcl[mix_pos_rand, :]
mix_p_x = mix_position[:, 0] - 0.5
mix_p_y = mix_position[:, 1] - 0.5
mix_p_z = mix_position[:, 2]
new_xyz[:, 0] = new_xyz[:, 0] - np.max(new_xyz[:, 0])
new_xyz[:, 1] = new_xyz[:, 1] - np.max(new_xyz[:, 1])
new_xyz[:, 2] = new_xyz[:, 2] - np.min(new_xyz[:, 2])
new_xyz[:, 0] = new_xyz[:, 0] + mix_p_x
new_xyz[:, 1] = new_xyz[:, 1] + mix_p_y
new_xyz[:, 2] = new_xyz[:, 2] + mix_p_z
if self.use_tta:
flip_type = vote_idx
else:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
new_xyz[:, 0] = -new_xyz[:, 0]
elif flip_type == 2:
new_xyz[:, 1] = -new_xyz[:, 1]
rotate_rad = np.deg2rad(np.random.random() * 360)
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
new_xyz[:, :2] = np.dot(new_xyz[:, :2], j)
aug_label.append(new_label)
aug_xyz.append(new_xyz)
mframe = int(len(new_xyz) / 130000)
if mframe > 1:
for i in range(1, mframe):
new_xyz[:, 0] = new_xyz[:, 0] - i / 2
aug_label.append(new_label)
aug_xyz.append(new_xyz)
new_label = np.concatenate(aug_label, axis=0)
new_xyz = np.concatenate(aug_xyz, axis=0)
# combine gt data and cut_mix augmentation
xyz = np.concatenate([xyz, new_xyz[:, :3]], axis=0)
labels = np.concatenate([labels, new_label[:, 1].reshape(-1, 1)], axis=0)
if sig is not None:
sig = np.concatenate([sig.reshape(-1, 1), new_xyz[:, 3].reshape(-1, 1)], axis=0)
sig = np.squeeze(sig)
if lcw is not None:
new_lcw = np.ones_like(new_label[:, 1]) * 100
lcw = np.concatenate([lcw, new_lcw.reshape(-1, 1)], axis=0)
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 90) - np.pi / 4
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
if self.use_tta:
flip_type = vote_idx
else:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
# convert coordinate into polar coordinates
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
# TODO: check if there is lcw label confidence weight
if len(data) == 6:
processed_lcw = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
lcw_voxel_pair = np.concatenate([grid_ind, lcw], axis=1)
lcw_voxel_pair = lcw_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_lcw = nb_process_label(np.copy(processed_lcw), lcw_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]),
axis=1) # np.concatenate((return_xyz, sig), axis=1) #
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
# include reference frame start and end index
if len(data) == 6:
data_tuple += (processed_lcw, ref_st_ind, ref_end_ind)
# include pseudo label confidence weights
elif len(data) == 5:
data_tuple += (ref_st_ind, ref_end_ind)
return data_tuple
@register_dataset
class polar_dataset(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=255, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 2], min_volume_space=[0, -np.pi, -4],
scale_aug=False):
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
def __len__(self):
'Denotes the total number of samples'
return len(self.point_cloud_dataset)
def __getitem__(self, index):
'Generates one sample of data'
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2:
sig = np.squeeze(sig)
elif len(data) == 5:
xyz, labels, sig, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
elif len(data) == 6:
xyz, labels, sig, lcw, ref_st_ind, ref_end_ind = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 45) - np.pi / 8
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
# TODO: check if there is lcw label confidence weight
if len(data) == 6:
processed_lcw = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
lcw_voxel_pair = np.concatenate([grid_ind, lcw], axis=1)
lcw_voxel_pair = lcw_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_lcw = nb_process_label(np.copy(processed_lcw), lcw_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) >= 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]),
axis=1) # np.concatenate((return_xyz, sig), axis=1) #
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
# include pseudo label confidence weights
if len(data) == 6:
data_tuple += (processed_lcw, ref_st_ind, ref_end_ind)
# refrence frame index
elif len(data) == 5:
data_tuple += (ref_st_ind, ref_end_ind)
return data_tuple
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])', nopython=True, cache=True, parallel=False)
def nb_process_label(processed_label, sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,), dtype=np.uint16)
counter[sorted_label_voxel_pair[0, 3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0, :3]
for i in range(1, sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i, :3]
if not np.all(np.equal(cur_ind, cur_sear_ind)):
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,), dtype=np.uint16)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i, 3]] += 1
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
return processed_label
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])', nopython=True, cache=True, parallel=False)
def nb_process_lcw(processed_label, sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,), dtype=np.float32)
counter[sorted_label_voxel_pair[0, 3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0, :3]
for i in range(1, sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i, :3]
if not np.all(np.equal(cur_ind, cur_sear_ind)):
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,), dtype=np.float32)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i, 3]] += 1
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
return processed_label
def collate_fn_BEV(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
ref_st_index = None
ref_end_index = None
lcw2stack = None
# if multi frame but not ssl: also add the start and end index of reference scan/frame
if len(data[0]) == 7:
ref_st_index = [d[5] for d in data]
ref_end_index = [d[6] for d in data]
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index
# if ssl and multi frame: also add the start and end index of reference scan/frame and
# confidence probability pseudo label
elif len(data[0]) == 8:
ref_st_index = [d[6] for d in data]
ref_end_index = [d[7] for d in data]
lcw2stack = np.stack([d[5] for d in data]).astype(np.float32)
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz
return torch.from_numpy(data2stack), torch.from_numpy(
label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
def collate_fn_BEV_tta(data):
data2stack = np.stack([da2[0] for da1 in data for da2 in da1]).astype(np.float32)
label2stack = np.stack([da2[1] for da1 in data for da2 in da1]).astype(np.int)
voxel_label = []
for da1 in data:
for da2 in da1:
voxel_label.append(da2[1])
#voxel_label.astype(np.int)
grid_ind_stack = []
for da1 in data:
for da2 in da1:
grid_ind_stack.append(da2[2])
point_label = []
for da1 in data:
for da2 in da1:
point_label.append(da2[3])
xyz = []
for da1 in data:
for da2 in da1:
xyz.append(da2[4])
# index = []
# for da1 in data:
# for da2 in da1:
# index.append(da2[5])
ref_st_index = None
ref_end_index = None
lcw2stack = None
# if multi frame but not ssl: also add the start and end index of reference scan/frame
if len(data[0]) == 7:
ref_st_index = [da2[5] for da1 in data for da2 in da1]
ref_end_index = [da2[6] for da1 in data for da2 in da1]
# return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index
# if ssl and multi frame: also add the start and end index of reference scan/frame and
# confidence probability pseudo label
elif len(data[0]) == 8:
ref_st_index = [da2[6] for da1 in data for da2 in da1]
ref_end_index = [da2[7] for da1 in data for da2 in da1]
lcw2stack = np.stack([da2[5] for da1 in data for da2 in da1]).astype(np.float32)
# return xyz, voxel_label, grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
return torch.from_numpy(data2stack), torch.from_numpy(
label2stack), grid_ind_stack, point_label, xyz, ref_st_index, ref_end_index, lcw2stack
def collate_fn_BEV_test(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
index = [d[5] for d in data]
return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz, index
| 29,622 | 40.372905 | 150 | py |
T-Concord3D | T-Concord3D-master/dataloader/augmentations.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
# from __future__ import (
# division,
# absolute_import,
# with_statement,
# print_function,
# unicode_literals,
# )
import random
import numpy as np
import torch
#from pointnet2.data.data_utils import angle_axis
# Utilis
def angle_axis(angle, axis):
# type: (float, np.ndarray) -> float
r"""Returns a 4x4 rotation matrix that performs a rotation around axis by angle
Parameters
----------
angle : float
Angle to rotate by
axis: np.ndarray
Axis to rotate about
Returns
-------
torch.Tensor
3x3 rotation matrix
"""
u = axis / np.linalg.norm(axis)
cosval, sinval = np.cos(angle), np.sin(angle)
# yapf: disable
cross_prod_mat = np.array([[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]])
R = torch.from_numpy(
cosval * np.eye(3)
+ sinval * cross_prod_mat
+ (1.0 - cosval) * np.outer(u, u)
)
# yapf: enable
return R.float()
##################################3
def RandomFlipX(points , v):
assert 0 <= v <= 1
if np.random.random() < v:
points[:,0] *= -1
return points
def RandomFlipY(points , v):
assert 0 <= v <= 1
if np.random.random() < v:
points[:,1] *= -1
return points
def RandomFlipZ(points , v):
assert 0 <= v <= 1
if np.random.random() < v:
points[:,2] *= -1
return points
def ScaleX(pts,v): # (0 , 2)
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 0 ] *= scaler
return pts
def ScaleY(pts,v): # (0 , 2)
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 1 ] *= scaler
return pts
def ScaleZ(pts,v): # (0 , 2)
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 2 ] *= scaler
return pts
def Resize(pts,v):
assert 0 <= v <= 0.5
scaler = np.random.uniform(low = 1-v, high = 1 + v)
pts[:, 0:3 ] *= scaler
return pts
def NonUniformScale(pts,v): # Resize in [0.5 , 1.5]
assert 0 <= v <= 0.5
scaler = np.random.uniform( low = 1 - v, high = 1 + v, size = 3 )
pts[:, 0:3] *= torch.from_numpy(scaler).float()
return pts
def RotateX(points,v): # ( 0 , 2 * pi)
assert 0 <= v <= 2 * np.pi
if np.random.random() > 0.5:
v *= -1
axis = np.array([1. , 0. , 0.])
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RotateY(points,v): # ( 0 , 2 * pi)
assert 0 <= v <= 2 * np.pi
if np.random.random() > 0.5:
v *= -1
axis = np.array([0. , 1. , 0.])
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RotateZ(points,v): # ( 0 , 2 * pi)
assert 0 <= v <= 2 * np.pi
if np.random.random() > 0.5:
v *= -1
axis = np.array([0. , 0. , 1.])
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RandomAxisRotation(points,v):
assert 0 <= v <= 2 * np.pi
axis = np.random.randn(3)
axis /= np.sqrt((axis**2).sum())
rotation_angle = np.random.uniform() * v
rotation_matrix = angle_axis(rotation_angle, axis)
normals = points.size(1) > 3
if not normals:
return points @ rotation_matrix.t()
else:
pc_xyz = points[:, 0:3]
pc_normals = points[:, 3:]
points[:, 0:3] = pc_xyz @ rotation_matrix.t()
points[:, 3:] = pc_normals @ rotation_matrix.t()
return points
def RotatePerturbation(points,v):
assert 0 <= v <= 10
v = int(v)
angle_sigma = 0.1 * v
angle_clip = 0.1 * v
n_idx = 50 * v
angles = np.clip(angle_sigma * np.random.randn(3), -angle_clip, angle_clip)
Rx = angle_axis(angles[0], np.array([1.0, 0.0, 0.0]))
Ry = angle_axis(angles[1], np.array([0.0, 1.0, 0.0]))
Rz = angle_axis(angles[2], np.array([0.0, 0.0, 1.0]))
rotation_matrix = Rz @ Ry @ Rx
center = torch.mean(points[:,0:3], dim = 0)
idx = np.random.choice(points.size(0),n_idx)
perturbation = points[idx, 0:3] - center
points[idx, :3] += (perturbation @ rotation_matrix.t()) - perturbation
normals = points.size(1) > 3
if normals:
pc_normals = points[idx, 3:]
points[idx, 3:] = pc_normals @ rotation_matrix.t()
return points
def Jitter(points,v):
assert 0.0 <= v <= 10
v = int(v)
sigma = 0.1 * v
n_idx = 50 * v
idx = np.random.choice(points.size(0),n_idx)
jitter = sigma * (np.random.random([n_idx, 3]) - 0.5)
points[idx, 0:3] += torch.from_numpy(jitter).float()
return points
def PointToNoise(points,v):
assert 0 <= v <= 0.5
mask = np.random.random(points.size(0)) < v
noise_idx = [idx for idx in range(len(mask)) if mask[idx] == True]
pts_rand = 2 * (np.random.random([len(noise_idx), 3]) - 0.5) + np.mean(points[:,0:3].numpy(), axis= 0)
points[noise_idx, 0:3] = torch.from_numpy(pts_rand).float()
return points
def UniformTranslate(points ,v):
assert 0 <= v <= 1
translation = (2 * np.random.random() - 1 ) * v
points[:, 0:3] += translation
return points
def NonUniformTranslate(points ,v):
assert 0 <= v <= 1
translation = (2 * np.random.random(3) - 1 ) * v
points[:, 0:3] += torch.from_numpy(translation).float()
return points
def RandomDropout(points,v):
assert 0.3 <= v <= 0.875
dropout_rate = v
drop = torch.rand(points.size(0)) < dropout_rate
save_idx = np.random.randint(points.size(0))
points[drop] = points[save_idx]
return points
def RandomErase(points,v):
assert 0 <= v <= 0.5
"v : the radius of erase ball"
random_idx = np.random.randint(points.size(0))
mask = torch.sum((points[:,0:3] - points[random_idx,0:3]).pow(2), dim = 1) < v ** 2
points[mask] = points[random_idx]
return points
#
# def DBSCAN(points,v):
# "https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html"
# assert 0 <= v <= 10
# from sklearn.cluster import DBSCAN
# eps = 0.03 * v
# min_samples = v
# clustering = DBSCAN(eps = eps , min_samples = min_samples).fit(points[:,0:3].numpy())
# for label in set(clustering.labels_):
# mask = (clustering.labels_ == label)
# points[mask,0:3] = torch.mean(points[mask,0:3] , dim = 0)
#
# return points
def ShearXY(points,v):
assert 0 <= v <= 0.5
a , b = v * (2 * np.random.random(2) - 1)
shear_matrix = np.array([[1, 0, 0],
[0, 1, 0],
[a, b, 1]])
shear_matrix = torch.from_numpy(shear_matrix).float()
points[:,0:3] = points[:, 0:3] @ shear_matrix.t()
return points
def ShearYZ(points,v):
assert 0 <= v <= 0.5
b , c = v * (2 * np.random.random(2) - 1)
shear_matrix = np.array([[1, b, c],
[0, 1, 0],
[0, 0, 1]])
shear_matrix = torch.from_numpy(shear_matrix).float()
points[:,0:3] = points[:, 0:3] @ shear_matrix.t()
return points
def ShearXZ(points,v):
assert 0 <= v <= 0.5
a , c = v * (2 * np.random.random(2) - 1)
shear_matrix = np.array([[1, 0, 0],
[a, 1, c],
[0, 0, 1]])
shear_matrix = torch.from_numpy(shear_matrix).float()
points[:,0:3] = points[:, 0:3] @ shear_matrix.t()
return points
def GlobalAffine(points,v):
assert 0 <= v <= 1
affine_matrix = torch.from_numpy(np.eye(3) + np.random.randn(3,3) * v).float()
points[:,0:3] = points[:, 0:3] @ affine_matrix.t()
return points
def Identity(points , v):
return points
def augment_list(): # operations and their ranges
l = (
(Identity , 0 ,10),
(RandomFlipX, 0, 1),
(RandomFlipY, 0, 1),
(RandomFlipZ, 0, 1),
(ScaleX, 0, 0.5),
(ScaleY, 0, 0.5),
(ScaleZ, 0, 0.5),
(NonUniformScale, 0 , 0.5),
(Resize , 0 , 0.5),
(RotateX, 0, 2 * np.pi),
(RotateY, 0, 2 * np.pi),
(RotateZ, 0, 2 * np.pi),
(RandomAxisRotation, 0, 2 * np.pi),
(RotatePerturbation, 0, 10),
(Jitter, 0 , 10),
(UniformTranslate, 0 , 0.5),
(NonUniformTranslate, 0 , 0.5),
(RandomDropout, 0.3 , 0.875),
(RandomErase, 0, 0.5),
(PointToNoise, 0 , 0.5),
(ShearXY, 0 , 0.5 ),
(ShearYZ, 0 , 0.5 ),
(ShearXZ, 0 , 0.5 ),
(GlobalAffine , 0 , 0.15),
)
return l
# class RandAugment3D:
# def __init__(self, n, m):
# self.n = n
# self.m = m # [0, 30]
# self.augment_list = augment_list()
#
# def __call__(self, img):
# ops = random.sample(self.augment_list, k=self.n)
# for op in ops:
# # val = (float(self.m) / 30) * float(maxval - minval) + minval
# val = self.m
# img = op(img, val)
#
# return img
class RandAugment3D:
def __init__(self, n=2, m=10):
"""
The number of augmentations = ?
N : The number of augmentation choice
M : magnitude of augmentation
"""
self.n = n
self.m = m # [0, 10]
self.augment_list = augment_list()
self.epoch = 0
def __call__(self, pc):
assert 0 <= self.m <= 10
if pc.dim() == 3:
bsize = pc.size()[0]
for i in range(bsize):
points = pc[i,:,:]
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = float(self.m)
points = op(points, val)
elif pc.dim() == 2:
points = pc
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 10) * float(maxval - minval) + minval
points = op(points, val)
return pc
def UpdateNM(self,increase=True):
N_tmp, M_tmp = self.n, self.m
if increase:
if np.random.random() > 0.5:
self.n += 1
elif self.m < 10:
self.m += 1
print("\n Increase N,M from ({},{}) to ({} ,{}) \n".format(N_tmp, M_tmp, self.n, self.m))
elif increase == False:
if np.random.random() > 0.5 and self.n > 1:
self.n -= 1
elif self.m > 1:
self.m -= 1
print("\n Decrease N,M from ({},{}) to ({} ,{}) \n".format(N_tmp, M_tmp, self.n, self.m))
| 11,680 | 27.079327 | 106 | py |
T-Concord3D | T-Concord3D-master/dataloader/pc_dataset.py | # -*- coding:utf-8 -*-
# author: Xinge
# @file: pc_dataset.py
import glob
import os
import pickle
from os.path import exists
import numpy as np
import yaml
from torch.utils import data
REGISTERED_PC_DATASET_CLASSES = {}
# past and future frames global place holders
past = 0
future = 0
T_past = 0
T_future = 0
ssl = False
rgb = False
def register_dataset(cls, name=None):
global REGISTERED_PC_DATASET_CLASSES
if name is None:
name = cls.__name__
assert name not in REGISTERED_PC_DATASET_CLASSES, f"exist class: {REGISTERED_PC_DATASET_CLASSES}"
REGISTERED_PC_DATASET_CLASSES[name] = cls
return cls
def get_pc_model_class(name):
global REGISTERED_PC_DATASET_CLASSES
assert name in REGISTERED_PC_DATASET_CLASSES, f"available class: {REGISTERED_PC_DATASET_CLASSES}"
return REGISTERED_PC_DATASET_CLASSES[name]
@register_dataset
class SemKITTI_demo(data.Dataset):
def __init__(self, data_path, imageset='demo',
return_ref=True, label_mapping="semantic-wod.yaml", demo_label_path=None):
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
self.return_ref = return_ref
self.im_idx = []
self.im_idx += absoluteFilePaths(data_path)
self.label_idx = []
if self.imageset == 'val':
print(demo_label_path)
self.label_idx += absoluteFilePaths(demo_label_path)
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
if self.imageset == 'demo':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
elif self.imageset == 'val':
annotated_data = np.fromfile(self.label_idx[index], dtype=np.uint32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:, 3],)
return data_tuple
@register_dataset
class SemKITTI_sk(data.Dataset):
def __init__(self, data_path, imageset='train',
return_ref=False, label_mapping="semantic-wod.yaml", nusc=None, ssl_data_path=None):
self.return_ref = return_ref
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
if imageset == 'train':
split = semkittiyaml['split']['train']
elif imageset == 'val':
split = semkittiyaml['split']['valid']
elif imageset == 'test':
split = semkittiyaml['split']['test']
else:
raise Exception('Split must be train/val/test')
global past, future, ssl, T_past, T_fture
self.past = past
self.future = future
self.T_past = T_past
self.T_future = T_future
self.im_idx = []
for i_folder in split:
self.im_idx += absoluteFilePaths('/'.join([data_path, str(i_folder).zfill(2), 'velodyne']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def __getitem__(self, index):
raw_data = np.fromfile(self.im_idx[index], dtype=np.float32).reshape((-1, 4))
'''
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.uint32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref:
data_tuple += (raw_data[:, 3],)
return data_tuple
'''
origin_len = len(raw_data)
if self.imageset == 'test':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
# x = self.im_idx[index].replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[:-3] + 'label'
if ssl and exists(self.im_idx[index].replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'label'):
annotated_data = np.fromfile(
self.im_idx[index].replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
else:
annotated_data = np.fromfile(self.im_idx[index].replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
# annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
if ssl and exists(self.im_idx[index].replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label'):
lcw = np.fromfile(self.im_idx[index].replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label',
dtype=np.float32).reshape((-1, 1))
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
elif ssl: # in case of GT label give weight = 1.0 per label
lcw = np.expand_dims(np.ones_like(raw_data[:, 0], dtype=np.float32), axis=1)
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
number_idx = int(self.im_idx[index][-10:-4])
dir_idx = int(self.im_idx[index][-22:-20])
past_frame_len = 0
future_frame_len = 0
annotated_data = np.vectorize(self.learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if self.return_ref and ssl:
# np.save('pcl.npy', raw_data[:, :3])
# np.save('label.npy', annotated_data)
# TODO: masking below 0.8 confidence
# lcw_mask = lcw < 80
# lcw[lcw_mask] = 0
data_tuple += (raw_data[:, 3], lcw, future_frame_len,
origin_len) # origin_len is used to indicate the length of target-scan and lcw
elif self.return_ref:
data_tuple += (
raw_data[:, 3], future_frame_len,
origin_len) # origin_len is used to indicate the length of target-scan
return data_tuple
@register_dataset
class SemKITTI_nusc(data.Dataset):
def __init__(self, data_path, imageset='train',
return_ref=False, label_mapping="nuscenes.yaml", nusc=None):
self.return_ref = return_ref
with open(imageset, 'rb') as f:
data = pickle.load(f)
with open(label_mapping, 'r') as stream:
nuscenesyaml = yaml.safe_load(stream)
self.learning_map = nuscenesyaml['learning_map']
self.nusc_infos = data['infos']
self.data_path = data_path
self.nusc = nusc
def __len__(self):
'Denotes the total number of samples'
return len(self.nusc_infos)
def __getitem__(self, index):
info = self.nusc_infos[index]
lidar_path = info['lidar_path'][16:]
lidar_sd_token = self.nusc.get('sample', info['token'])['data']['LIDAR_TOP']
lidarseg_labels_filename = os.path.join(self.nusc.dataroot,
self.nusc.get('lidarseg', lidar_sd_token)['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape([-1, 1])
points_label = np.vectorize(self.learning_map.__getitem__)(points_label)
points = np.fromfile(os.path.join(self.data_path, lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
data_tuple = (points[:, :3], points_label.astype(np.uint8))
if self.return_ref:
data_tuple += (points[:, 3],)
return data_tuple
def absoluteFilePaths(directory):
for dirpath, _, filenames in os.walk(directory):
filenames.sort()
for f in filenames:
yield os.path.abspath(os.path.join(dirpath, f))
def SemKITTI2train(label):
if isinstance(label, list):
return [SemKITTI2train_single(a) for a in label]
else:
return SemKITTI2train_single(label)
def SemKITTI2train_single(label):
remove_ind = label == 0
label -= 1
label[remove_ind] = 255
return label
from os.path import join
def transform_pcl_scan(points, pose0, pose):
# pose = poses[0][idx]
hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1])))
# new_points = hpoints.dot(pose.T)
new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1)
new_points = new_points[:, :3]
new_coords = new_points - pose0[:3, 3]
# new_coords = new_coords.dot(pose0[:3, :3])
new_coords = np.sum(np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1)
new_coords = np.hstack((new_coords, points[:, 3:]))
return new_coords
def fuse_multiscan(ref_raw_data, ref_annotated_data, ref_lcw, transformed_data,
transformed_annotated_data, transformed_lcw, source, ssl):
lcw = None
if (source != 1) and (source != -1):
print(f"Error data source {source} not Implemented")
return 0
if source == -1: # past frame
raw_data = np.concatenate((transformed_data, ref_raw_data), 0)
annotated_data = np.concatenate((transformed_annotated_data, ref_annotated_data), 0)
if ssl:
lcw = np.concatenate((transformed_lcw, ref_lcw), 0)
if source == 1: # future frame
raw_data = np.concatenate((ref_raw_data, transformed_data,), 0)
annotated_data = np.concatenate((ref_annotated_data, transformed_annotated_data), 0)
if ssl:
lcw = np.concatenate((ref_lcw, transformed_lcw), 0)
return raw_data, annotated_data, lcw
def get_combined_data(raw_data, annotated_data, lcw, learning_map, return_ref, origin_len, preceding_frame_len, ssl):
#print(np.unique(annotated_data))
annotated_data = np.vectorize(learning_map.__getitem__)(annotated_data)
data_tuple = (raw_data[:, :3], annotated_data.astype(np.uint8))
if return_ref and ssl:
# np.save('pcl.npy', raw_data[:, :3])
# np.save('label.npy', annotated_data)
# TODO: masking below 0.8 confidence
# lcw_mask = lcw < 80
# lcw[lcw_mask] = 0
# origin_len is used to indicate the length of target-scan and lcw
data_tuple += (raw_data[:, 3], lcw, preceding_frame_len, origin_len)
elif return_ref:
# origin_len is used to indicate the length of target-scan
data_tuple += (raw_data[:, 3], preceding_frame_len, origin_len)
return data_tuple
@register_dataset
class SemKITTI_sk_multiscan(data.Dataset):
def __init__(self, data_path, imageset='train', return_ref=False, label_mapping="semantic-kitti-multiscan.yaml",
train_hypers=None, wod=None, ssl_data_path=None):
global past, future, ssl, T_past, T_fture
with open(label_mapping, 'r') as stream:
semkittiyaml = yaml.safe_load(stream)
self.return_ref = return_ref
self.learning_map = semkittiyaml['learning_map']
self.imageset = imageset
self.data_path = data_path
self.past = train_hypers['past']
self.future = train_hypers['future']
self.T_past = train_hypers['T_past']
self.T_future = train_hypers['T_future']
self.ssl = train_hypers['ssl']
self.im_idx = []
self.calibrations = []
# self.times = []
self.poses = []
if imageset == 'train':
self.split = semkittiyaml['split']['train']
if self.ssl and (ssl_data_path is not None):
self.split += semkittiyaml['split']['pseudo']
elif imageset == 'val':
self.split = semkittiyaml['split']['valid']
elif imageset == 'test':
self.split = semkittiyaml['split']['test']
elif imageset == 'pseudo':
self.split = semkittiyaml['split']['pseudo']
else:
raise Exception(f'{imageset}: Split must be train/val/test/pseudo')
if self.past or self.future:
self.load_calib_poses()
for i_folder in self.split:
self.im_idx += absoluteFilePaths('/'.join([data_path, str(i_folder).zfill(2), 'velodyne']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
# self.times = []
self.poses = {} # []
for seq in self.split:
seq_folder = join(self.data_path, str(seq).zfill(2))
# Read Calib
self.calibrations.append(self.parse_calibration(join(seq_folder, "calib.txt")))
# Read times
# self.times.append(np.loadtxt(join(seq_folder, 'times.txt'), dtype=np.float32))
# Read poses
poses_f64 = self.parse_poses(join(seq_folder, 'poses.txt'), self.calibrations[-1])
# self.poses.append([pose.astype(np.float32) for pose in poses_f64])
self.poses[seq] = [pose.astype(np.float32) for pose in poses_f64]
def parse_calibration(self, filename):
""" read calibration file with given filename
Returns
-------
dict
Calibration matrices as 4x4 numpy arrays.
"""
calib = {}
calib_file = open(filename)
# print(filename)
for line in calib_file:
key, content = line.strip().split(":")
values = [float(v) for v in content.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
calib[key] = pose
calib_file.close()
return calib
def parse_poses(self, filename, calibration):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
file = open(filename)
# print(filename)
poses = []
Tr = calibration["Tr"]
Tr_inv = np.linalg.inv(Tr)
for line in file:
values = [float(v) for v in line.strip().split()]
pose = np.zeros((4, 4))
pose[0, 0:4] = values[0:4]
pose[1, 0:4] = values[4:8]
pose[2, 0:4] = values[8:12]
pose[3, 3] = 1.0
poses.append(np.matmul(Tr_inv, np.matmul(pose, Tr)))
return poses
def get_semantickitti_data(self, newpath, time_frame_idx):
raw_data = np.fromfile(newpath, dtype=np.float32).reshape((-1, 4))
lcw = None
if self.imageset == 'test' or self.imageset == 'pseudo':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0], dtype=int), axis=1)
else:
if self.ssl and exists(newpath.replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[:-3]
+ 'label'):
annotated_data = np.fromfile(
newpath.replace('velodyne', f"predictions_{self.T_past}_{self.T_future}")[:-3] + 'label',
dtype=np.int64).reshape((-1, 1))
else:
annotated_data = np.fromfile(newpath.replace('velodyne', 'labels')[:-3] + 'label',
dtype=np.int32).reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
# if np.sum(np.unique(annotated_data == 18)) > 0:
# print(newpath)
if self.ssl and exists(newpath.replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label'):
lcw = np.fromfile(
newpath.replace('velodyne', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'label',
dtype=np.float32).reshape((-1, 1))
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
elif self.ssl: # in case of GT label give weight = 1.0 per label
lcw = np.expand_dims(np.ones_like(raw_data[:, 0], dtype=np.float32), axis=1)
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
return raw_data, annotated_data, len(raw_data), lcw
def __getitem__(self, index):
# reference scan
reference_file = self.im_idx[index]
raw_data, annotated_data, data_len, lcw = self.get_semantickitti_data(reference_file, 0)
origin_len = data_len
number_idx = int(self.im_idx[index][-10:-4])
# dir_idx = int(self.im_idx[index][-22:-20])
dir_idx = self.im_idx[index].split('/')[-3]
# past scan
past_frame_len = 0
# TODO: added the future frame availability check
if self.past and ((number_idx - self.past) >= 0) and ((number_idx + self.past) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.past):
# TODO: past frames
frame_ind = fuse_idx + 1
pose = self.poses[dir_idx][number_idx - frame_ind]
past_file = self.im_idx[index][:-10] + str(number_idx - frame_ind).zfill(6) + self.im_idx[index][-4:]
past_raw_data, past_annotated_data, past_data_len, past_lcw = self.get_semantickitti_data(past_file,
-frame_ind)
past_raw_data = transform_pcl_scan(past_raw_data, pose0, pose)
# past frames
if past_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
past_raw_data, past_annotated_data, past_lcw, -1,
self.ssl)
# count number of past frame points
past_frame_len += past_data_len
# future scan
future_frame_len = 0
# TODO: added the future frame availability check
if self.future and ((number_idx - self.future) >= 0) and (
(number_idx + self.future) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.future):
# TODO: future frame
frame_ind = fuse_idx + 1
future_pose = self.poses[dir_idx][number_idx + frame_ind]
future_file = self.im_idx[index][:-10] + str(number_idx + frame_ind).zfill(6) + self.im_idx[index][-4:]
future_raw_data, future_annotated_data, future_data_len, future_lcw = self.get_semantickitti_data(
future_file, frame_ind)
future_raw_data = transform_pcl_scan(future_raw_data, pose0, future_pose)
# TODO: check correctness (future frame)
if future_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
future_raw_data, future_annotated_data, future_lcw,
1, self.ssl)
# count number of future frame points
future_frame_len += future_data_len
# extract compiled data_tuple
data_tuple = get_combined_data(raw_data, annotated_data, lcw, self.learning_map, self.return_ref,
origin_len, past_frame_len, self.ssl)
# # TODO: added the future frame availability check
return data_tuple
# WOD -------------------------------------------------------------
# def fuse_multi_scan(points, pose0, pose):
# # pose = poses[0][idx]
#
# hpoints = np.hstack((points[:, :3], np.ones_like(points[:, :1])))
# # new_points = hpoints.dot(pose.T)
# new_points = np.sum(np.expand_dims(hpoints, 2) * pose.T, axis=1)
#
# new_points = new_points[:, :3]
# new_coords = new_points - pose0[:3, 3]
# # new_coords = new_coords.dot(pose0[:3, :3])
# new_coords = np.sum(np.expand_dims(new_coords, 2) * pose0[:3, :3], axis=1)
# new_coords = np.hstack((new_coords, points[:, 3:]))
#
# return new_coords
@register_dataset
class WOD_multiscan(data.Dataset):
def __init__(self, data_path, imageset='train', return_ref=False, label_mapping="wod-multiscan_labelled.yaml",
train_hypers=None, wod=None, ssl_data_path=None):
global past, future, ssl, T_past, T_fture, rgb
self.return_ref = return_ref
with open(label_mapping, 'r') as stream:
wodyaml = yaml.safe_load(stream)
self.learning_map = wodyaml['learning_map']
self.imageset = imageset
self.data_path = data_path
self.past = train_hypers['past']
self.future = train_hypers['future']
self.T_past = train_hypers['T_past']
self.T_future = train_hypers['T_future']
self.rgb = train_hypers['rgb']
self.ssl = train_hypers['ssl']
# self.use_time = train_hypers['time'] # Use time instead of intensity
# self.UDA = train_hypers['uda']
self.im_idx = []
self.calibrations = []
# self.times = []
self.poses = {}
if imageset == 'train':
self.split = wodyaml['split']['train']
# self.sensor_zpose = train_hypers["S_sensor_zpose"]
if self.ssl and (ssl_data_path is not None):
self.split += wodyaml['split']['pseudo']
elif imageset == 'val':
self.split = wodyaml['split']['valid']
# self.sensor_zpose = train_hypers["S_sensor_zpose"]
elif imageset == 'test':
self.split = wodyaml['split']['test']
# self.sensor_zpose = train_hypers["S_sensor_zpose"]
elif imageset == 'pseudo':
self.split = wodyaml['split']['pseudo']
# self.sensor_zpose = train_hypers["T_sensor_zpose"]
else:
raise Exception(f'{imageset}: Split must be train/val/test/pseudo')
# self.split = sorted(os.listdir(self.data_path))
# self.training_len = len(self.split)
# self.ssl_data_path = ssl_data_path # '/mnt/beegfs/gpu/argoverse-tracking-all-training/WOD/processed/Unlabeled/testing'
# xx = self.data_path.split("/")[-1]
# if ssl and self.data_path.split("/")[-1] == "training" and self.ssl_data_path:
# self.training_len = len(sorted(os.listdir(self.data_path)))
# self.split = sorted(os.listdir(self.data_path)) + sorted(os.listdir(self.ssl_data_path))
# print(len(self.split))
# TODO: remove after search experiment
# self.split = self.split[150:]
if self.past or self.future:
self.load_calib_poses()
# for c, i_folder in enumerate(self.split):
# if ssl and (self.data_path.split("/")[-1] == "training") and (
# c >= self.training_len): # 789 number of training folders
# self.im_idx += absoluteFilePaths('/'.join([self.ssl_data_path, str(i_folder), 'lidar']))
# else:
# self.im_idx += absoluteFilePaths('/'.join([self.data_path, str(i_folder), 'lidar']))
for c, i_folder in enumerate(self.split):
self.im_idx += absoluteFilePaths('/'.join([self.data_path, str(i_folder), 'lidar']))
def __len__(self):
'Denotes the total number of samples'
return len(self.im_idx)
def load_calib_poses(self):
"""
load calib poses and times.
"""
###########
# Load data
###########
self.calibrations = []
# self.times = []
self.poses = {} # []
for k, seq in enumerate(self.split): # range(0, 22):
# if ssl and (self.data_path.split("/")[-1] == "training") and (
# k >= self.training_len): # 789 number of training folders
# seq_folder = join(self.ssl_data_path, str(seq))
# else:
seq_folder = join(self.data_path, str(seq))
# Read poses
poses_f64 = self.parse_poses(seq_folder, k)
# self.poses.append([pose.astype(np.float32) for pose in poses_f64])
self.poses[seq] = [pose.astype(np.float32) for pose in poses_f64]
def parse_poses(self, seq, k):
""" read poses file with per-scan poses from given filename
Returns
-------
list
list of poses as 4x4 numpy arrays.
"""
filename = sorted(glob.glob(os.path.join(seq, "poses", "*.npy")))
poses = []
for file in filename:
pose = np.load(file)
poses.append(pose)
return poses
def get_wod_data(self, newpath, time_frame_idx):
raw_data = np.load(newpath)
# if self.use_time:
# raw_data[:, 3] = np.ones_like(raw_data[:, 3]) * time_frame_idx
# if self.UDA:
# raw_data[:, 2] += self.sensor_zpose # elevate the point cloud two meters up to align with WOD
# TODO: check if the colors are encoded correctly instead of the lidar intensity
if self.rgb:
# load rgb colors for each points
raw_rgb = np.load(newpath.replace('lidar', 'colors')[
:-3] + 'npy')
# convert rgb into gray scale [0, 255]
raw_gray = 0.2989 * raw_rgb[:, 0] + 0.5870 * raw_rgb[:, 1] + 0.1140 * raw_rgb[:, 2]
# mask (0) ignored point colors (originally not provided on wod rear-cameras) -> rgb:[1,1,1] or gray:[
# 0.99990])
gray_mask = raw_gray > 1 # < 1 #0.9998999999999999
# assign 0 to the place we want to mask
raw_gray[gray_mask] = -1
# replace intensity with gray scale camera image/frame color
raw_data[:, 3] = raw_gray
# raw_data[:,4] = gray_mask * 1
lcw = None
origin_len = len(raw_data)
if self.imageset == 'test' or self.imageset == 'pseudo':
annotated_data = np.expand_dims(np.zeros_like(raw_data[:, 0]), axis=1).reshape((-1, 1))
else:
# x = self.im_idx[index].replace('lidar', f"predictions_{self.T_past}_{self.T_future}")[:-3] + 'label'
if self.ssl and exists(newpath.replace('lidar', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'npy'):
annotated_data = np.load(
newpath.replace('lidar', f"predictions_{self.T_past}_{self.T_future}")[
:-3] + 'npy').reshape((-1, 1))
else:
# print(self.im_idx[index].replace('lidar', 'labels')[:-3] + 'npy')
annotated_data = np.load(newpath.replace('lidar', 'labels')[:-3] + 'npy',
allow_pickle=True)
if len(annotated_data.shape) == 2:
if annotated_data.shape[1] == 2:
annotated_data = annotated_data[:, 1]
# Reshape the label/annotation to vector.
annotated_data = annotated_data.reshape((-1, 1))
annotated_data = annotated_data & 0xFFFF # delete high 16 digits binary
if self.ssl and exists(newpath.replace('lidar', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'npy'):
lcw = np.load(newpath.replace('lidar', f"probability_{self.T_past}_{self.T_future}")[
:-3] + 'npy').reshape((-1, 1))
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
elif self.ssl: # in case of GT label give weight = 1.0 per label
lcw = np.expand_dims(np.ones_like(raw_data[:, 0]), axis=1)
# TODO: check casting
lcw = (lcw * 100).astype(np.int32)
return raw_data, annotated_data, len(raw_data), lcw
def __getitem__(self, index):
# reference scan
reference_file = self.im_idx[index]
raw_data, annotated_data, data_len, lcw = self.get_wod_data(reference_file, 0)
origin_len = data_len
number_idx = int(self.im_idx[index][-10:-4])
# dir_idx = int(self.im_idx[index][-22:-20])
dir_idx = self.im_idx[index].split('/')[-3]
# past scan
past_frame_len = 0
# TODO: added the future frame availability check
if self.past and ((number_idx - self.past) >= 0) and ((number_idx + self.past) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.past):
# TODO: past frames
frame_ind = fuse_idx + 1
pose = self.poses[dir_idx][number_idx - frame_ind]
past_file = self.im_idx[index][:-10] + str(number_idx - frame_ind).zfill(6) + self.im_idx[index][-4:]
past_raw_data, past_annotated_data, past_data_len, past_lcw = self.get_wod_data(past_file, -frame_ind)
# transform the past frame into reference frame coordinate system
past_raw_data = transform_pcl_scan(past_raw_data, pose0, pose)
# past frames
if past_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
past_raw_data, past_annotated_data, past_lcw, -1,
self.ssl)
# count number of past frame points
past_frame_len += past_data_len
# future scan
future_frame_len = 0
# TODO: added the future frame availability check
if self.future and ((number_idx - self.future) >= 0) and (
(number_idx + self.future) < len(self.poses[dir_idx])):
# extract the poss of the reference frame
pose0 = self.poses[dir_idx][number_idx]
for fuse_idx in range(self.future):
# TODO: future frame
frame_ind = fuse_idx + 1
future_pose = self.poses[dir_idx][number_idx + frame_ind]
future_file = self.im_idx[index][:-10] + str(number_idx + frame_ind).zfill(6) + self.im_idx[index][-4:]
future_raw_data, future_annotated_data, future_data_len, future_lcw = self.get_wod_data(future_file,
frame_ind)
# transform the future frame into reference frame coordinate system
future_raw_data = transform_pcl_scan(future_raw_data, pose0, future_pose)
# TODO: check correctness (future frame)
if future_data_len != 0:
raw_data, annotated_data, lcw = fuse_multiscan(raw_data, annotated_data, lcw,
future_raw_data, future_annotated_data, future_lcw,
1, self.ssl)
# count number of future frame points
future_frame_len += future_data_len
# extract compiled data_tuple
data_tuple = get_combined_data(raw_data, annotated_data, lcw, self.learning_map, self.return_ref,
origin_len, past_frame_len, self.ssl)
return data_tuple
# load label class info
def get_label_name(label_mapping):
with open(label_mapping, 'r') as stream:
config_yaml = yaml.safe_load(stream)
class_label_name = dict()
for i in sorted(list(config_yaml['learning_map'].keys()))[::-1]:
class_label_name[config_yaml['learning_map'][i]] = config_yaml['labels'][i]
return class_label_name
def get_label_inv_name(label_inv_mapping):
with open(label_inv_mapping, 'r') as stream:
config_yaml = yaml.safe_load(stream)
# label_inv_name = dict()
label_inv_name = config_yaml['learning_map_inv']
return label_inv_name
def get_nuScenes_label_name(label_mapping):
with open(label_mapping, 'r') as stream:
nuScenesyaml = yaml.safe_load(stream)
nuScenes_label_name = dict()
for i in sorted(list(nuScenesyaml['learning_map'].keys()))[::-1]:
val_ = nuScenesyaml['learning_map'][i]
nuScenes_label_name[val_] = nuScenesyaml['labels_16'][val_]
return nuScenes_label_name
def update_config(configs):
global past, future, T_past, T_future, ssl, rgb
train_hypers = configs['train_params']
past = train_hypers['past']
future = train_hypers['future']
T_past = train_hypers['T_past']
T_future = train_hypers['T_future']
ssl = train_hypers['ssl']
rgb = train_hypers['rgb'] | 34,323 | 40.354217 | 130 | py |
T-Concord3D | T-Concord3D-master/dataloader/dataset_nuscenes.py | # -*- coding:utf-8 -*-
# author: Xinge
# @file: dataset_nuscenes.py
import numpy as np
import torch
import numba as nb
from torch.utils import data
from dataloader.dataset_semantickitti import register_dataset
def cart2polar(input_xyz):
rho = np.sqrt(input_xyz[:, 0] ** 2 + input_xyz[:, 1] ** 2)
phi = np.arctan2(input_xyz[:, 1], input_xyz[:, 0])
return np.stack((rho, phi, input_xyz[:, 2]), axis=1)
def polar2cat(input_xyz_polar):
x = input_xyz_polar[0] * np.cos(input_xyz_polar[1])
y = input_xyz_polar[0] * np.sin(input_xyz_polar[1])
return np.stack((x, y, input_xyz_polar[2]), axis=0)
@register_dataset
class cylinder_dataset_nuscenes(data.Dataset):
def __init__(self, in_dataset, grid_size, rotate_aug=False, flip_aug=False, ignore_label=0, return_test=False,
fixed_volume_space=False, max_volume_space=[50, np.pi, 3], min_volume_space=[0, -np.pi, -5],
scale_aug=False, transform_aug=False, trans_std=[0.1, 0.1, 0.1],
min_rad=-np.pi / 4, max_rad=np.pi / 4):
'Initialization'
self.point_cloud_dataset = in_dataset
self.grid_size = np.asarray(grid_size)
self.rotate_aug = rotate_aug
self.flip_aug = flip_aug
self.scale_aug = scale_aug
self.ignore_label = ignore_label
self.return_test = return_test
self.fixed_volume_space = fixed_volume_space
self.max_volume_space = max_volume_space
self.min_volume_space = min_volume_space
self.transform = transform_aug
self.trans_std = trans_std
self.noise_rotation = np.random.uniform(min_rad, max_rad)
def __len__(self):
return len(self.point_cloud_dataset)
def __getitem__(self, index):
data = self.point_cloud_dataset[index]
if len(data) == 2:
xyz, labels = data
elif len(data) == 3:
xyz, labels, sig = data
if len(sig.shape) == 2: sig = np.squeeze(sig)
else:
raise Exception('Return invalid data tuple')
# random data augmentation by rotation
if self.rotate_aug:
rotate_rad = np.deg2rad(np.random.random() * 360) - np.pi
c, s = np.cos(rotate_rad), np.sin(rotate_rad)
j = np.matrix([[c, s], [-s, c]])
xyz[:, :2] = np.dot(xyz[:, :2], j)
# random data augmentation by flip x , y or x+y
if self.flip_aug:
flip_type = np.random.choice(4, 1)
if flip_type == 1:
xyz[:, 0] = -xyz[:, 0]
elif flip_type == 2:
xyz[:, 1] = -xyz[:, 1]
elif flip_type == 3:
xyz[:, :2] = -xyz[:, :2]
if self.scale_aug:
noise_scale = np.random.uniform(0.95, 1.05)
xyz[:, 0] = noise_scale * xyz[:, 0]
xyz[:, 1] = noise_scale * xyz[:, 1]
# convert coordinate into polar coordinates
if self.transform:
noise_translate = np.array([np.random.normal(0, self.trans_std[0], 1),
np.random.normal(0, self.trans_std[1], 1),
np.random.normal(0, self.trans_std[2], 1)]).T
xyz[:, 0:3] += noise_translate
xyz_pol = cart2polar(xyz)
max_bound_r = np.percentile(xyz_pol[:, 0], 100, axis=0)
min_bound_r = np.percentile(xyz_pol[:, 0], 0, axis=0)
max_bound = np.max(xyz_pol[:, 1:], axis=0)
min_bound = np.min(xyz_pol[:, 1:], axis=0)
max_bound = np.concatenate(([max_bound_r], max_bound))
min_bound = np.concatenate(([min_bound_r], min_bound))
if self.fixed_volume_space:
max_bound = np.asarray(self.max_volume_space)
min_bound = np.asarray(self.min_volume_space)
# get grid index
crop_range = max_bound - min_bound
cur_grid_size = self.grid_size
intervals = crop_range / (cur_grid_size - 1)
if (intervals == 0).any(): print("Zero interval!")
grid_ind = (np.floor((np.clip(xyz_pol, min_bound, max_bound) - min_bound) / intervals)).astype(np.int)
voxel_position = np.zeros(self.grid_size, dtype=np.float32)
dim_array = np.ones(len(self.grid_size) + 1, int)
dim_array[0] = -1
voxel_position = np.indices(self.grid_size) * intervals.reshape(dim_array) + min_bound.reshape(dim_array)
voxel_position = polar2cat(voxel_position)
# process labels
processed_label = np.ones(self.grid_size, dtype=np.uint8) * self.ignore_label
label_voxel_pair = np.concatenate([grid_ind, labels], axis=1)
label_voxel_pair = label_voxel_pair[np.lexsort((grid_ind[:, 0], grid_ind[:, 1], grid_ind[:, 2])), :]
processed_label = nb_process_label(np.copy(processed_label), label_voxel_pair)
data_tuple = (voxel_position, processed_label)
# center data on each voxel for PTnet
voxel_centers = (grid_ind.astype(np.float32) + 0.5) * intervals + min_bound
return_xyz = xyz_pol - voxel_centers
return_xyz = np.concatenate((return_xyz, xyz_pol, xyz[:, :2]), axis=1)
if len(data) == 2:
return_fea = return_xyz
elif len(data) == 3:
return_fea = np.concatenate((return_xyz, sig[..., np.newaxis]), axis=1)
if self.return_test:
data_tuple += (grid_ind, labels, return_fea, index)
else:
data_tuple += (grid_ind, labels, return_fea)
return data_tuple
@nb.jit('u1[:,:,:](u1[:,:,:],i8[:,:])', nopython=True, cache=True, parallel=False)
def nb_process_label(processed_label, sorted_label_voxel_pair):
label_size = 256
counter = np.zeros((label_size,), dtype=np.uint16)
counter[sorted_label_voxel_pair[0, 3]] = 1
cur_sear_ind = sorted_label_voxel_pair[0, :3]
for i in range(1, sorted_label_voxel_pair.shape[0]):
cur_ind = sorted_label_voxel_pair[i, :3]
if not np.all(np.equal(cur_ind, cur_sear_ind)):
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
counter = np.zeros((label_size,), dtype=np.uint16)
cur_sear_ind = cur_ind
counter[sorted_label_voxel_pair[i, 3]] += 1
processed_label[cur_sear_ind[0], cur_sear_ind[1], cur_sear_ind[2]] = np.argmax(counter)
return processed_label
def collate_fn_BEV(data):
data2stack = np.stack([d[0] for d in data]).astype(np.float32)
label2stack = np.stack([d[1] for d in data]).astype(np.int)
grid_ind_stack = [d[2] for d in data]
point_label = [d[3] for d in data]
xyz = [d[4] for d in data]
return torch.from_numpy(data2stack), torch.from_numpy(label2stack), grid_ind_stack, point_label, xyz
# SemKITTI_label_name = {0: 'noise',
# 1: 'animal',
# 2: 'human.pedestrian.adult',
# 3: 'human.pedestrian.child',
# 4: 'human.pedestrian.construction_worker',
# 5: 'human.pedestrian.personal_mobility',
# 6: 'human.pedestrian.police_officer',
# 7: 'human.pedestrian.stroller',
# 8: 'human.pedestrian.wheelchair',
# 9: 'movable_object.barrier',
# 10: 'movable_object.debris',
# 11: 'movable_object.pushable_pullable',
# 12: 'movable_object.trafficcone',
# 13: 'static_object.bicycle_rack',
# 14: 'vehicle.bicycle',
# 15: 'vehicle.bus.bendy',
# 16: 'vehicle.bus.rigid',
# 17: 'vehicle.car',
# 18: 'vehicle.construction',
# 19: 'vehicle.emergency.ambulance',
# 20: 'vehicle.emergency.police',
# 21: 'vehicle.motorcycle',
# 22: 'vehicle.trailer',
# 23: 'vehicle.truck',
# 24: 'flat.driveable_surface',
# 25: 'flat.other',
# 26: 'flat.sidewalk',
# 27: 'flat.terrain',
# 28: 'static.manmade',
# 29: 'static.other',
# 30: 'static.vegetation',
# 31: 'vehicle.ego'
# }
#
# SemKITTI_label_name_16 = {
# 0: 'noise',
# 1: 'barrier',
# 2: 'bicycle',
# 3: 'bus',
# 4: 'car',
# 5: 'construction_vehicle',
# 6: 'motorcycle',
# 7: 'pedestrian',
# 8: 'traffic_cone',
# 9: 'trailer',
# 10: 'truck',
# 11: 'driveable_surface',
# 12: 'other_flat',
# 13: 'sidewalk',
# 14: 'terrain',
# 15: 'manmade',
# 16: 'vegetation',
# }
#
# labels_mapping = {
# 1: 0,
# 5: 0,
# 7: 0,
# 8: 0,
# 10: 0,
# 11: 0,
# 13: 0,
# 19: 0,
# 20: 0,
# 0: 0,
# 29: 0,
# 31: 0,
# 9: 1,
# 14: 2,
# 15: 3,
# 16: 3,
# 17: 4,
# 18: 5,
# 21: 6,
# 2: 7,
# 3: 7,
# 4: 7,
# 6: 7,
# 12: 8,
# 22: 9,
# 23: 10,
# 24: 11,
# 25: 12,
# 26: 13,
# 27: 14,
# 28: 15,
# 30: 16
# }
| 9,266 | 35.920319 | 114 | py |
T-Concord3D | T-Concord3D-master/dataloader/preprocess.py | # -*- coding:utf-8 -*-
# author: Awet H. Gebrehiwot
# --------------------------|
import torch
import torchvision.transforms as transforms
from augmentations import RandAugment3D
def preprocessing(point_set, cls):
pts_transform = transforms.Compose(
[]
)
pts_transform.transforms.insert(0, RandAugment3D(2, 2))
return torch.from_numpy(pts_transform(point_set)), cls
| 397 | 23.875 | 59 | py |
darts | darts-master/cnn/test.py | import os
import sys
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
def infer(test_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(test_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 3,593 | 33.228571 | 102 | py |
darts | darts-master/cnn/architect.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
class Architect(object):
def __init__(self, model, args):
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(),
lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)
def _compute_unrolled_model(self, input, target, eta, network_optimizer):
loss = self.model._loss(input, target)
theta = _concat(self.model.parameters()).data
try:
moment = _concat(network_optimizer.state[v]['momentum_buffer'] for v in self.model.parameters()).mul_(self.network_momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, self.model.parameters())).data + self.network_weight_decay*theta
unrolled_model = self._construct_model_from_theta(theta.sub(eta, moment+dtheta))
return unrolled_model
def step(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer, unrolled):
self.optimizer.zero_grad()
if unrolled:
self._backward_step_unrolled(input_train, target_train, input_valid, target_valid, eta, network_optimizer)
else:
self._backward_step(input_valid, target_valid)
self.optimizer.step()
def _backward_step(self, input_valid, target_valid):
loss = self.model._loss(input_valid, target_valid)
loss.backward()
def _backward_step_unrolled(self, input_train, target_train, input_valid, target_valid, eta, network_optimizer):
unrolled_model = self._compute_unrolled_model(input_train, target_train, eta, network_optimizer)
unrolled_loss = unrolled_model._loss(input_valid, target_valid)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
vector = [v.grad.data for v in unrolled_model.parameters()]
implicit_grads = self._hessian_vector_product(vector, input_train, target_train)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta, ig.data)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
def _construct_model_from_theta(self, theta):
model_new = self.model.new()
model_dict = self.model.state_dict()
params, offset = {}, 0
for k, v in self.model.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, input, target, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
loss = self.model._loss(input, target)
grads_p = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.sub_(2*R, v)
loss = self.model._loss(input, target)
grads_n = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
| 3,429 | 35.88172 | 130 | py |
darts | darts-master/cnn/train_imagenet.py | import os
import sys
import numpy as np
import time
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-5, help='weight decay')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=250, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5., help='gradient clipping')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--gamma', type=float, default=0.97, help='learning rate decay')
parser.add_argument('--decay_period', type=int, default=1, help='epochs between two learning rate decays')
parser.add_argument('--parallel', action='store_true', default=False, help='data parallelism')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CLASSES = 1000
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (-targets * log_probs).mean(0).sum()
return loss
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
if args.parallel:
model = nn.DataParallel(model).cuda()
else:
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
]))
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
best_acc_top1 = 0
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion_smooth, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_acc_top1': best_acc_top1,
'optimizer' : optimizer.state_dict(),
}, is_best, args.save)
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
target = target.cuda(async=True)
input = input.cuda()
input = Variable(input)
target = Variable(target)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 7,992 | 33.601732 | 106 | py |
darts | darts-master/cnn/utils.py | import os
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
| 3,080 | 24.254098 | 105 | py |
darts | darts-master/cnn/model.py | import torch
import torch.nn as nn
from operations import *
from torch.autograd import Variable
from utils import drop_path
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
op_names, indices = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
op_names, indices = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert len(op_names) == len(indices)
self._steps = len(op_names) // 2
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for name, index in zip(op_names, indices):
stride = 2 if reduction and index < 2 else 1
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[2*i]]
h2 = states[self._indices[2*i+1]]
op1 = self._ops[2*i]
op2 = self._ops[2*i+1]
h1 = op1(h1)
h2 = op2(h2)
if self.training and drop_prob > 0.:
if not isinstance(op1, Identity):
h1 = drop_path(h1, drop_prob)
if not isinstance(op2, Identity):
h2 = drop_path(h2, drop_prob)
s = h1 + h2
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 8x8"""
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
"""assuming input size 14x14"""
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False),
nn.Conv2d(C, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
# NOTE: This batchnorm was omitted in my earlier implementation due to a typo.
# Commenting it out for consistency with the experiments in the paper.
# nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0),-1))
return x
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkCIFAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr
if i == 2*layers//3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2*self._layers//3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits, logits_aux
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.stem0 = nn.Sequential(
nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C // 2),
nn.ReLU(inplace=True),
nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
self.stem1 = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(C),
)
C_prev_prev, C_prev, C_curr = C, C, C
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if i in [layers // 3, 2 * layers // 3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr
if i == 2 * layers // 3:
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), -1))
return logits, logits_aux
| 6,640 | 29.888372 | 89 | py |
darts | darts-master/cnn/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import *
from torch.autograd import Variable
from genotypes import PRIMITIVES
from genotypes import Genotype
class MixedOp(nn.Module):
def __init__(self, C, stride):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if 'pool' in primitive:
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
return sum(w * op(x) for w, op in zip(weights, self._ops))
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
for i in range(self._steps):
for j in range(2+i):
stride = 2 if reduction and j < 2 else 1
op = MixedOp(C, stride)
self._ops.append(op)
def forward(self, s0, s1, weights):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
s = sum(self._ops[offset+j](h, weights[offset+j]) for j, h in enumerate(states))
offset += len(states)
states.append(s)
return torch.cat(states[-self._multiplier:], dim=1)
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, steps=4, multiplier=4, stem_multiplier=3):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
C_curr = stem_multiplier*C
self.stem = nn.Sequential(
nn.Conv2d(3, C_curr, 3, padding=1, bias=False),
nn.BatchNorm2d(C_curr)
)
C_prev_prev, C_prev, C_curr = C_curr, C_curr, C
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if i in [layers//3, 2*layers//3]:
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
C_prev_prev, C_prev = C_prev, multiplier*C_curr
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion).cuda()
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def forward(self, input):
s0 = s1 = self.stem(input)
for i, cell in enumerate(self.cells):
if cell.reduction:
weights = F.softmax(self.alphas_reduce, dim=-1)
else:
weights = F.softmax(self.alphas_normal, dim=-1)
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def _loss(self, input, target):
logits = self(input)
return self._criterion(logits, target)
def _initialize_alphas(self):
k = sum(1 for i in range(self._steps) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_normal = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_reduce = Variable(1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self._arch_parameters = [
self.alphas_normal,
self.alphas_reduce,
]
def arch_parameters(self):
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._steps):
end = start + n
W = weights[start:end].copy()
edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=-1).data.cpu().numpy())
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=-1).data.cpu().numpy())
concat = range(2+self._steps-self._multiplier, self._steps+2)
genotype = Genotype(
normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat
)
return genotype
| 5,009 | 29.54878 | 128 | py |
darts | darts-master/cnn/train_search.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model_search import Network
from architect import Architect
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
args = parser.parse_args()
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, args)
for epoch in range(args.epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
genotype = model.genotype()
logging.info('genotype = %s', genotype)
print(F.softmax(model.alphas_normal, dim=-1))
print(F.softmax(model.alphas_reduce, dim=-1))
# training
train_acc, train_obj = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr)
logging.info('train_acc %f', train_acc)
# validation
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for step, (input, target) in enumerate(train_queue):
model.train()
n = input.size(0)
input = Variable(input, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda(async=True)
# get a random minibatch from the search queue with replacement
input_search, target_search = next(iter(valid_queue))
input_search = Variable(input_search, requires_grad=False).cuda()
target_search = Variable(target_search, requires_grad=False).cuda(async=True)
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 7,212 | 35.80102 | 115 | py |
darts | darts-master/cnn/test_imagenet.py | import os
import sys
import numpy as np
import torch
import utils
import glob
import random
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkImageNet as Network
parser = argparse.ArgumentParser("imagenet")
parser.add_argument('--data', type=str, default='../data/imagenet/', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--report_freq', type=float, default=100, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=48, help='num of init channels')
parser.add_argument('--layers', type=int, default=14, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--drop_path_prob', type=float, default=0, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
CLASSES = 1000
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
model.load_state_dict(torch.load(args.model_path)['state_dict'])
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
valid_data = dset.ImageFolder(
validdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
model.drop_path_prob = args.drop_path_prob
valid_acc_top1, valid_acc_top5, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, top5.avg, objs.avg
if __name__ == '__main__':
main()
| 3,785 | 32.504425 | 104 | py |
darts | darts-master/cnn/train.py | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
input = Variable(input).cuda()
target = Variable(target).cuda(async=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 6,251 | 35.561404 | 100 | py |
darts | darts-master/cnn/operations.py | import torch
import torch.nn as nn
OPS = {
'none' : lambda C, stride, affine: Zero(stride),
'avg_pool_3x3' : lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False),
'max_pool_3x3' : lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1),
'skip_connect' : lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),
'sep_conv_3x3' : lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine),
'sep_conv_5x5' : lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine),
'sep_conv_7x7' : lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine),
'dil_conv_3x3' : lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine),
'dil_conv_5x5' : lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine),
'conv_7x1_1x7' : lambda C, stride, affine: nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False),
nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False),
nn.BatchNorm2d(C, affine=affine)
),
}
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x):
return self.op(x)
class DilConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
super(DilConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class SepConv(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x):
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, stride):
super(Zero, self).__init__()
self.stride = stride
def forward(self, x):
if self.stride == 1:
return x.mul(0.)
return x[:,:,::self.stride,::self.stride].mul(0.)
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(FactorizedReduce, self).__init__()
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1)
out = self.bn(out)
return out
| 3,717 | 34.075472 | 129 | py |
darts | darts-master/rnn/test.py | import argparse
import os, sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import data
import model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=850,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=850,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=850,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.3,
help='dropout for rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.2,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=1267,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--model_path', type=str, default='EXP/model.pt',
help='path to load the pretrained model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--n_experts', type=int, default=1,
help='number of experts')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
args = parser.parse_args()
def logging(s, print_=True, log_=True):
print(s)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
test_batch_size = 1
test_data = batchify(corpus.test, test_batch_size, args)
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
print(i, data_source.size(0)-1)
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
# Load the best saved model.
model = torch.load(args.model_path)
total_params = sum(x.data.nelement() for x in model.parameters())
logging('Args: {}'.format(args))
logging('Model total parameters: {}'.format(total_params))
parallel_model = model.cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging('=' * 89)
logging('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 89)
| 5,048 | 40.385246 | 118 | py |
darts | darts-master/rnn/architect.py | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def _clip(grads, max_norm):
total_norm = 0
for g in grads:
param_norm = g.data.norm(2)
total_norm += param_norm ** 2
total_norm = total_norm ** 0.5
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grads:
g.data.mul_(clip_coef)
return clip_coef
class Architect(object):
def __init__(self, model, args):
self.network_weight_decay = args.wdecay
self.network_clip = args.clip
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(), lr=args.arch_lr, weight_decay=args.arch_wdecay)
def _compute_unrolled_model(self, hidden, input, target, eta):
loss, hidden_next = self.model._loss(hidden, input, target)
theta = _concat(self.model.parameters()).data
grads = torch.autograd.grad(loss, self.model.parameters())
clip_coef = _clip(grads, self.network_clip)
dtheta = _concat(grads).data + self.network_weight_decay*theta
unrolled_model = self._construct_model_from_theta(theta.sub(eta, dtheta))
return unrolled_model, clip_coef
def step(self,
hidden_train, input_train, target_train,
hidden_valid, input_valid, target_valid,
network_optimizer, unrolled):
eta = network_optimizer.param_groups[0]['lr']
self.optimizer.zero_grad()
if unrolled:
hidden = self._backward_step_unrolled(hidden_train, input_train, target_train, hidden_valid, input_valid, target_valid, eta)
else:
hidden = self._backward_step(hidden_valid, input_valid, target_valid)
self.optimizer.step()
return hidden, None
def _backward_step(self, hidden, input, target):
loss, hidden_next = self.model._loss(hidden, input, target)
loss.backward()
return hidden_next
def _backward_step_unrolled(self,
hidden_train, input_train, target_train,
hidden_valid, input_valid, target_valid, eta):
unrolled_model, clip_coef = self._compute_unrolled_model(hidden_train, input_train, target_train, eta)
unrolled_loss, hidden_next = unrolled_model._loss(hidden_valid, input_valid, target_valid)
unrolled_loss.backward()
dalpha = [v.grad for v in unrolled_model.arch_parameters()]
dtheta = [v.grad for v in unrolled_model.parameters()]
_clip(dtheta, self.network_clip)
vector = [dt.data for dt in dtheta]
implicit_grads = self._hessian_vector_product(vector, hidden_train, input_train, target_train, r=1e-2)
for g, ig in zip(dalpha, implicit_grads):
g.data.sub_(eta * clip_coef, ig.data)
for v, g in zip(self.model.arch_parameters(), dalpha):
if v.grad is None:
v.grad = Variable(g.data)
else:
v.grad.data.copy_(g.data)
return hidden_next
def _construct_model_from_theta(self, theta):
model_new = self.model.new()
model_dict = self.model.state_dict()
params, offset = {}, 0
for k, v in self.model.named_parameters():
v_length = np.prod(v.size())
params[k] = theta[offset: offset+v_length].view(v.size())
offset += v_length
assert offset == len(theta)
model_dict.update(params)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def _hessian_vector_product(self, vector, hidden, input, target, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
loss, _ = self.model._loss(hidden, input, target)
grads_p = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.sub_(2*R, v)
loss, _ = self.model._loss(hidden, input, target)
grads_n = torch.autograd.grad(loss, self.model.arch_parameters())
for p, v in zip(self.model.parameters(), vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
| 4,003 | 34.122807 | 132 | py |
darts | darts-master/rnn/utils.py | import torch
import torch.nn as nn
import os, shutil
import numpy as np
from torch.autograd import Variable
def repackage_hidden(h):
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz, args):
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
print(data.size())
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, args, seq_len=None, evaluation=False):
seq_len = min(seq_len if seq_len else args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
target = Variable(source[i+1:i+1+seq_len])
return data, target
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def save_checkpoint(model, optimizer, epoch, path, finetune=False):
if finetune:
torch.save(model, os.path.join(path, 'finetune_model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'finetune_optimizer.pt'))
else:
torch.save(model, os.path.join(path, 'model.pt'))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer.pt'))
torch.save({'epoch': epoch+1}, os.path.join(path, 'misc.pt'))
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new().resize_((embed.weight.size(0), 1)).bernoulli_(1 - dropout).expand_as(embed.weight) / (1 - dropout)
mask = Variable(mask)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
X = embed._backend.Embedding.apply(words, masked_embed_weight,
padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
class LockedDropout(nn.Module):
def __init__(self):
super(LockedDropout, self).__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = Variable(m.div_(1 - dropout), requires_grad=False)
mask = mask.expand_as(x)
return mask * x
def mask2d(B, D, keep_prob, cuda=True):
m = torch.floor(torch.rand(B, D) + keep_prob) / keep_prob
m = Variable(m, requires_grad=False)
if cuda:
m = m.cuda()
return m
| 2,955 | 30.446809 | 137 | py |
darts | darts-master/rnn/model.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from genotypes import STEPS
from utils import mask2d
from utils import LockedDropout
from utils import embedded_dropout
from torch.autograd import Variable
INITRANGE = 0.04
class DARTSCell(nn.Module):
def __init__(self, ninp, nhid, dropouth, dropoutx, genotype):
super(DARTSCell, self).__init__()
self.nhid = nhid
self.dropouth = dropouth
self.dropoutx = dropoutx
self.genotype = genotype
# genotype is None when doing arch search
steps = len(self.genotype.recurrent) if self.genotype is not None else STEPS
self._W0 = nn.Parameter(torch.Tensor(ninp+nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE))
self._Ws = nn.ParameterList([
nn.Parameter(torch.Tensor(nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE)) for i in range(steps)
])
def forward(self, inputs, hidden):
T, B = inputs.size(0), inputs.size(1)
if self.training:
x_mask = mask2d(B, inputs.size(2), keep_prob=1.-self.dropoutx)
h_mask = mask2d(B, hidden.size(2), keep_prob=1.-self.dropouth)
else:
x_mask = h_mask = None
hidden = hidden[0]
hiddens = []
for t in range(T):
hidden = self.cell(inputs[t], hidden, x_mask, h_mask)
hiddens.append(hidden)
hiddens = torch.stack(hiddens)
return hiddens, hiddens[-1].unsqueeze(0)
def _compute_init_state(self, x, h_prev, x_mask, h_mask):
if self.training:
xh_prev = torch.cat([x * x_mask, h_prev * h_mask], dim=-1)
else:
xh_prev = torch.cat([x, h_prev], dim=-1)
c0, h0 = torch.split(xh_prev.mm(self._W0), self.nhid, dim=-1)
c0 = c0.sigmoid()
h0 = h0.tanh()
s0 = h_prev + c0 * (h0-h_prev)
return s0
def _get_activation(self, name):
if name == 'tanh':
f = F.tanh
elif name == 'relu':
f = F.relu
elif name == 'sigmoid':
f = F.sigmoid
elif name == 'identity':
f = lambda x: x
else:
raise NotImplementedError
return f
def cell(self, x, h_prev, x_mask, h_mask):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
states = [s0]
for i, (name, pred) in enumerate(self.genotype.recurrent):
s_prev = states[pred]
if self.training:
ch = (s_prev * h_mask).mm(self._Ws[i])
else:
ch = s_prev.mm(self._Ws[i])
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
fn = self._get_activation(name)
h = fn(h)
s = s_prev + c * (h-s_prev)
states += [s]
output = torch.mean(torch.stack([states[i] for i in self.genotype.concat], -1), -1)
return output
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nhidlast,
dropout=0.5, dropouth=0.5, dropoutx=0.5, dropouti=0.5, dropoute=0.1,
cell_cls=DARTSCell, genotype=None):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(ntoken, ninp)
assert ninp == nhid == nhidlast
if cell_cls == DARTSCell:
assert genotype is not None
self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx, genotype)]
else:
assert genotype is None
self.rnns = [cell_cls(ninp, nhid, dropouth, dropoutx)]
self.rnns = torch.nn.ModuleList(self.rnns)
self.decoder = nn.Linear(ninp, ntoken)
self.decoder.weight = self.encoder.weight
self.init_weights()
self.ninp = ninp
self.nhid = nhid
self.nhidlast = nhidlast
self.dropout = dropout
self.dropouti = dropouti
self.dropoute = dropoute
self.ntoken = ntoken
self.cell_cls = cell_cls
def init_weights(self):
self.encoder.weight.data.uniform_(-INITRANGE, INITRANGE)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-INITRANGE, INITRANGE)
def forward(self, input, hidden, return_h=False):
batch_size = input.size(1)
emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
emb = self.lockdrop(emb, self.dropouti)
raw_output = emb
new_hidden = []
raw_outputs = []
outputs = []
for l, rnn in enumerate(self.rnns):
current_input = raw_output
raw_output, new_h = rnn(raw_output, hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, self.dropout)
outputs.append(output)
logit = self.decoder(output.view(-1, self.ninp))
log_prob = nn.functional.log_softmax(logit, dim=-1)
model_output = log_prob
model_output = model_output.view(-1, batch_size, self.ntoken)
if return_h:
return model_output, hidden, raw_outputs, outputs
return model_output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return [Variable(weight.new(1, bsz, self.nhid).zero_())]
| 5,148 | 30.981366 | 102 | py |
darts | darts-master/rnn/data.py | import os
import torch
from collections import Counter
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding='utf-8') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
class SentCorpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding='utf-8') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
sents = []
with open(path, 'r', encoding='utf-8') as f:
for line in f:
if not line:
continue
words = line.split() + ['<eos>']
sent = torch.LongTensor(len(words))
for i, word in enumerate(words):
sent[i] = self.dictionary.word2idx[word]
sents.append(sent)
return sents
class BatchSentLoader(object):
def __init__(self, sents, batch_size, pad_id=0, cuda=False, volatile=False):
self.sents = sents
self.batch_size = batch_size
self.sort_sents = sorted(sents, key=lambda x: x.size(0))
self.cuda = cuda
self.volatile = volatile
self.pad_id = pad_id
def __next__(self):
if self.idx >= len(self.sort_sents):
raise StopIteration
batch_size = min(self.batch_size, len(self.sort_sents)-self.idx)
batch = self.sort_sents[self.idx:self.idx+batch_size]
max_len = max([s.size(0) for s in batch])
tensor = torch.LongTensor(max_len, batch_size).fill_(self.pad_id)
for i in range(len(batch)):
s = batch[i]
tensor[:s.size(0),i].copy_(s)
if self.cuda:
tensor = tensor.cuda()
self.idx += batch_size
return tensor
next = __next__
def __iter__(self):
self.idx = 0
return self
if __name__ == '__main__':
corpus = SentCorpus('../penn')
loader = BatchSentLoader(corpus.test, 10)
for i, d in enumerate(loader):
print(i, d.size())
| 4,005 | 30.054264 | 80 | py |
darts | darts-master/rnn/model_search.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from genotypes import PRIMITIVES, STEPS, CONCAT, Genotype
from torch.autograd import Variable
from collections import namedtuple
from model import DARTSCell, RNNModel
class DARTSCellSearch(DARTSCell):
def __init__(self, ninp, nhid, dropouth, dropoutx):
super(DARTSCellSearch, self).__init__(ninp, nhid, dropouth, dropoutx, genotype=None)
self.bn = nn.BatchNorm1d(nhid, affine=False)
def cell(self, x, h_prev, x_mask, h_mask):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
s0 = self.bn(s0)
probs = F.softmax(self.weights, dim=-1)
offset = 0
states = s0.unsqueeze(0)
for i in range(STEPS):
if self.training:
masked_states = states * h_mask.unsqueeze(0)
else:
masked_states = states
ch = masked_states.view(-1, self.nhid).mm(self._Ws[i]).view(i+1, -1, 2*self.nhid)
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
s = torch.zeros_like(s0)
for k, name in enumerate(PRIMITIVES):
if name == 'none':
continue
fn = self._get_activation(name)
unweighted = states + c * (fn(h) - states)
s += torch.sum(probs[offset:offset+i+1, k].unsqueeze(-1).unsqueeze(-1) * unweighted, dim=0)
s = self.bn(s)
states = torch.cat([states, s.unsqueeze(0)], 0)
offset += i+1
output = torch.mean(states[-CONCAT:], dim=0)
return output
class RNNModelSearch(RNNModel):
def __init__(self, *args):
super(RNNModelSearch, self).__init__(*args, cell_cls=DARTSCellSearch, genotype=None)
self._args = args
self._initialize_arch_parameters()
def new(self):
model_new = RNNModelSearch(*self._args)
for x, y in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def _initialize_arch_parameters(self):
k = sum(i for i in range(1, STEPS+1))
weights_data = torch.randn(k, len(PRIMITIVES)).mul_(1e-3)
self.weights = Variable(weights_data.cuda(), requires_grad=True)
self._arch_parameters = [self.weights]
for rnn in self.rnns:
rnn.weights = self.weights
def arch_parameters(self):
return self._arch_parameters
def _loss(self, hidden, input, target):
log_prob, hidden_next = self(input, hidden, return_h=False)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), target)
return loss, hidden_next
def genotype(self):
def _parse(probs):
gene = []
start = 0
for i in range(STEPS):
end = start + i + 1
W = probs[start:end].copy()
j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[0]
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
return gene
gene = _parse(F.softmax(self.weights, dim=-1).data.cpu().numpy())
genotype = Genotype(recurrent=gene, concat=range(STEPS+1)[-CONCAT:])
return genotype
| 3,278 | 32.804124 | 125 | py |
darts | darts-master/rnn/train_search.py | import argparse
import os, sys, glob
import time
import math
import numpy as np
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from architect import Architect
import gc
import data
import model_search as model
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=300,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=300,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=300,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=50,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=3,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=5e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_wdecay', type=float, default=1e-3,
help='weight decay for the architecture encoding alpha')
parser.add_argument('--arch_lr', type=float, default=3e-3,
help='learning rate for the architecture encoding alpha')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
search_data = batchify(corpus.valid, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute)
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
logging.info('initial genotype:')
logging.info(model.genotype())
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
architect = Architect(parallel_model, args)
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
# seq_len = max(5, int(np.random.normal(bptt, 5)))
# # There's a very small chance that it could select a very long sequence length resulting in OOM
# seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
seq_len = int(bptt)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args)
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id])
hidden_valid[s_id], grad_norm = architect.step(
hidden[s_id], cur_data, cur_targets,
hidden_valid[s_id], cur_data_valid, cur_targets_valid,
optimizer,
args.unrolled)
# assuming small_batch_size = batch_size so we don't accumulate gradients
optimizer.zero_grad()
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % args.log_interval == 0 and batch > 0:
logging.info(parallel_model.genotype())
print(F.softmax(parallel_model.weights, dim=-1))
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
best_val_loss.append(val_loss)
| 12,639 | 43.041812 | 132 | py |
darts | darts-master/rnn/train.py | import os
import gc
import sys
import glob
import time
import math
import numpy as np
import torch
import torch.nn as nn
import logging
import argparse
import genotypes
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import data
import model
from torch.autograd import Variable
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')
parser.add_argument('--data', type=str, default='../data/penn/',
help='location of the data corpus')
parser.add_argument('--emsize', type=int, default=850,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=850,
help='number of hidden units per layer')
parser.add_argument('--nhidlast', type=int, default=850,
help='number of hidden units for the last rnn layer')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.75,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.25,
help='dropout for hidden nodes in rnn layers (0 = no dropout)')
parser.add_argument('--dropoutx', type=float, default=0.75,
help='dropout for input nodes rnn layers (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.2,
help='dropout for input embedding layers (0 = no dropout)')
parser.add_argument('--dropoute', type=float, default=0.1,
help='dropout to remove words from embedding layer (0 = no dropout)')
parser.add_argument('--seed', type=int, default=1267,
help='random seed')
parser.add_argument('--nonmono', type=int, default=5,
help='random seed')
parser.add_argument('--cuda', action='store_false',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='EXP',
help='path to save the final model')
parser.add_argument('--alpha', type=float, default=0,
help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')
parser.add_argument('--beta', type=float, default=1e-3,
help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')
parser.add_argument('--wdecay', type=float, default=8e-7,
help='weight decay applied to all weights')
parser.add_argument('--continue_train', action='store_true',
help='continue train from a checkpoint')
parser.add_argument('--small_batch_size', type=int, default=-1,
help='the batch size for computation. batch_size should be divisible by small_batch_size.\
In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
until batch_size is reached. An update step is then performed.')
parser.add_argument('--max_seq_len_delta', type=int, default=20,
help='max sequence length')
parser.add_argument('--single_gpu', default=True, action='store_false',
help='use single GPU')
parser.add_argument('--gpu', type=int, default=0, help='GPU device to use')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
if args.nhidlast < 0:
args.nhidlast = args.emsize
if args.small_batch_size < 0:
args.small_batch_size = args.batch_size
if not args.continue_train:
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
cudnn.enabled=True
torch.cuda.manual_seed_all(args.seed)
corpus = data.Corpus(args.data)
eval_batch_size = 10
test_batch_size = 1
train_data = batchify(corpus.train, args.batch_size, args)
val_data = batchify(corpus.valid, eval_batch_size, args)
test_data = batchify(corpus.test, test_batch_size, args)
ntokens = len(corpus.dictionary)
if args.continue_train:
model = torch.load(os.path.join(args.save, 'model.pt'))
else:
genotype = eval("genotypes.%s" % args.arch)
model = model.RNNModel(ntokens, args.emsize, args.nhid, args.nhidlast,
args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute,
cell_cls=model.DARTSCell, genotype=genotype)
if args.cuda:
if args.single_gpu:
parallel_model = model.cuda()
else:
parallel_model = nn.DataParallel(model, dim=1).cuda()
else:
parallel_model = model
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
logging.info('Genotype: {}'.format(genotype))
def evaluate(data_source, batch_size=10):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, args, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = parallel_model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5)))
# There's a very small chance that it could select a very long sequence length resulting in OOM
seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt
model.train()
data, targets = get_batch(train_data, i, args, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, args.small_batch_size, 0
while start < args.batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activiation Regularization
if args.alpha > 0:
loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal Activation Regularization (slowness)
loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= args.small_batch_size / args.batch_size
total_loss += raw_loss.data * args.small_batch_size / args.batch_size
loss.backward()
s_id += 1
start = end
end = start + args.small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if np.isnan(total_loss[0]):
raise
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
batch += 1
i += seq_len
# Loop over epochs.
lr = args.lr
best_val_loss = []
stored_loss = 100000000
# At any point you can hit Ctrl + C to break out of training early.
try:
if args.continue_train:
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
epoch = 1
while epoch < args.epochs + 1:
epoch_start_time = time.time()
try:
train()
except:
logging.info('rolling back to the previous best model ...')
model = torch.load(os.path.join(args.save, 'model.pt'))
parallel_model = model.cuda()
optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))
if 't0' in optimizer_state['param_groups'][0]:
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
optimizer.load_state_dict(optimizer_state)
epoch = torch.load(os.path.join(args.save, 'misc.pt'))['epoch']
continue
if 't0' in optimizer.param_groups[0]:
tmp = {}
for prm in model.parameters():
tmp[prm] = prm.data.clone()
prm.data = optimizer.state[prm]['ax'].clone()
val_loss2 = evaluate(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss2, math.exp(val_loss2)))
logging.info('-' * 89)
if val_loss2 < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Averaged!')
stored_loss = val_loss2
for prm in model.parameters():
prm.data = tmp[prm].clone()
else:
val_loss = evaluate(val_data, eval_batch_size)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, epoch, args.save)
logging.info('Saving Normal!')
stored_loss = val_loss
if 't0' not in optimizer.param_groups[0] and (len(best_val_loss)>args.nonmono and val_loss > min(best_val_loss[:-args.nonmono])):
logging.info('Switching!')
optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
best_val_loss.append(val_loss)
epoch += 1
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
model = torch.load(os.path.join(args.save, 'model.pt'))
parallel_model = model.cuda()
# Run on test data.
test_loss = evaluate(test_data, test_batch_size)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
logging.info('=' * 89)
| 13,900 | 42.037152 | 141 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#!/usr/bin/env python
import glob
import os
import torch
from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "maskrcnn_benchmark", "csrc")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
# if True:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"maskrcnn_benchmark._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="maskrcnn_benchmark",
version="0.1",
author="fmassa",
url="https://github.com/facebookresearch/maskrnn-benchmark",
description="object detection in pytorch",
packages=find_packages(exclude=("configs", "examples", "test",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 2,068 | 28.140845 | 73 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/tools/test_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.text_inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logging import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
# Check if we can enable mixed-precision via apex.amp
try:
from apex import amp
except ImportError:
raise ImportError('Use APEX for mixed precision via apex.amp')
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="./configs/seq.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.deprecated.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
# Initialize mixed-precision if necessary
use_mixed_precision = cfg.DTYPE == 'float16'
amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)
checkpointer = DetectronCheckpointer(cfg, model)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
if cfg.OUTPUT_DIR:
dataset_names = cfg.DATASETS.TEST
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
model_name = cfg.MODEL.WEIGHT.split('/')[-1]
for output_folder, data_loader_val in zip(output_folders, data_loaders_val):
inference(
model,
data_loader_val,
iou_types=iou_types,
box_only=cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
model_name=model_name,
cfg=cfg,
)
synchronize()
if __name__ == "__main__":
main()
| 3,686 | 34.451923 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/tools/demo.py | import os
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.utils.chars import getstr_grid, get_tight_rect
from PIL import Image
import numpy as np
import argparse
class TextDemo(object):
def __init__(
self,
cfg,
confidence_threshold=0.7,
min_image_size=224,
output_polygon=True
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
checkpointer = DetectronCheckpointer(cfg, self.model)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
self.output_polygon = output_polygon
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image):
"""
Arguments:
image (np.ndarray): an image as returned by OpenCV
Returns:
result_polygons (list): detection results
result_words (list): recognition results
"""
result_polygons, result_words = self.compute_prediction(image)
return result_polygons, result_words
def compute_prediction(self, original_image):
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
with torch.no_grad():
predictions, _, _ = self.model(image_list)
global_predictions = predictions[0]
char_predictions = predictions[1]
char_mask = char_predictions['char_mask']
char_boxes = char_predictions['boxes']
words, rec_scores = self.process_char_mask(char_mask, char_boxes)
seq_words = char_predictions['seq_outputs']
seq_scores = char_predictions['seq_scores']
global_predictions = [o.to(self.cpu_device) for o in global_predictions]
# always single image is passed at a time
global_prediction = global_predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
global_prediction = global_prediction.resize((width, height))
boxes = global_prediction.bbox.tolist()
scores = global_prediction.get_field("scores").tolist()
masks = global_prediction.get_field("mask").cpu().numpy()
result_polygons = []
result_words = []
for k, box in enumerate(boxes):
score = scores[k]
if score < self.confidence_threshold:
continue
box = list(map(int, box))
mask = masks[k,0,:,:]
polygon = self.mask2polygon(mask, box, original_image.shape, threshold=0.5, output_polygon=self.output_polygon)
if polygon is None:
polygon = [box[0], box[1], box[2], box[1], box[2], box[3], box[0], box[3]]
result_polygons.append(polygon)
word = words[k]
rec_score = rec_scores[k]
seq_word = seq_words[k]
seq_char_scores = seq_scores[k]
seq_score = sum(seq_char_scores) / float(len(seq_char_scores))
if seq_score > rec_score:
result_words.append(seq_word)
else:
result_words.append(word)
return result_polygons, result_words
def process_char_mask(self, char_masks, boxes, threshold=192):
texts, rec_scores = [], []
for index in range(char_masks.shape[0]):
box = list(boxes[index])
box = list(map(int, box))
text, rec_score, _, _ = getstr_grid(char_masks[index,:,:,:].copy(), box, threshold=threshold)
texts.append(text)
rec_scores.append(rec_score)
return texts, rec_scores
def mask2polygon(self, mask, box, im_size, threshold=0.5, output_polygon=True):
# mask 32*128
image_width, image_height = im_size[1], im_size[0]
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cls_polys = (mask*255).astype(np.uint8)
poly_map = np.array(Image.fromarray(cls_polys).resize((box_w, box_h)))
poly_map = poly_map.astype(np.float32) / 255
poly_map=cv2.GaussianBlur(poly_map,(3,3),sigmaX=3)
ret, poly_map = cv2.threshold(poly_map,0.5,1,cv2.THRESH_BINARY)
if output_polygon:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
try:
_, contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
except:
contours, _ = cv2.findContours((poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours)==0:
print(contours)
print(len(contours))
return None
max_area=0
max_cnt = contours[0]
for cnt in contours:
area=cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_cnt = cnt
perimeter = cv2.arcLength(max_cnt,True)
epsilon = 0.01*cv2.arcLength(max_cnt,True)
approx = cv2.approxPolyDP(max_cnt,epsilon,True)
pts = approx.reshape((-1,2))
pts[:,0] = pts[:,0] + box[0]
pts[:,1] = pts[:,1] + box[1]
polygon = list(pts.reshape((-1,)))
polygon = list(map(int, polygon))
if len(polygon)<6:
return None
else:
SE1=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
poly_map = cv2.erode(poly_map,SE1)
poly_map = cv2.dilate(poly_map,SE1);
poly_map = cv2.morphologyEx(poly_map,cv2.MORPH_CLOSE,SE1)
idy,idx=np.where(poly_map == 1)
xy=np.vstack((idx,idy))
xy=np.transpose(xy)
hull = cv2.convexHull(xy, clockwise=True)
#reverse order of points.
if hull is None:
return None
hull=hull[::-1]
#find minimum area bounding box.
rect = cv2.minAreaRect(hull)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, box[0], box[1], image_height, image_width, 1)
polygon = [x * 1.0 for x in pts]
polygon = list(map(int, polygon))
return polygon
def visualization(self, image, polygons, words):
for polygon, word in zip(polygons, words):
pts = np.array(polygon, np.int32)
pts = pts.reshape((-1,1,2))
xmin = min(pts[:,0,0])
ymin = min(pts[:,0,1])
cv2.polylines(image,[pts],True,(0,0,255))
cv2.putText(image, word, (xmin, ymin), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
def main(args):
# update the config options with the config file
cfg.merge_from_file(args.config_file)
# manual override some options
# cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
text_demo = TextDemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
output_polygon=True
)
# load image and then run prediction
image = cv2.imread(args.image_path)
result_polygons, result_words = text_demo.run_on_opencv_image(image)
text_demo.visualization(image, result_polygons, result_words)
cv2.imwrite(args.visu_path, image)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='parameters for demo')
parser.add_argument("--config-file", type=str, default='configs/mixtrain/seg_rec_poly_fuse_feature.yaml')
parser.add_argument("--image_path", type=str, default='./demo_images/demo.jpg')
parser.add_argument("--visu_path", type=str, default='./demo_images/demo_results.jpg')
args = parser.parse_args()
main(args) | 9,628 | 39.288703 | 123 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/tools/train_net.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""
Basic training script for PyTorch
"""
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logging import setup_logger, Logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
# See if we can use apex.DistributedDataParallel instead of the torch default,
# and enable mixed-precision via apex.amp
try:
from apex import amp
except ImportError:
raise ImportError('Use APEX for multi-precision via apex.amp')
def train(cfg, local_rank, distributed):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
# Initialize mixed-precision training
use_mixed_precision = cfg.DTYPE == "float16"
amp_opt_level = 'O1' if use_mixed_precision else 'O0'
model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
# find_unused_parameters=True
)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.SOLVER.RESUME)
if cfg.SOLVER.RESUME:
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=arguments["iteration"],
)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
tb_logger = Logger(cfg.OUTPUT_DIR, local_rank)
do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
tb_logger,
cfg,
local_rank,
)
return model
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
mkdir(output_dir)
local_rank = get_rank()
logger = setup_logger("maskrcnn_benchmark", output_dir, local_rank)
if local_rank == 0:
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = train(cfg, args.local_rank, args.distributed)
if __name__ == "__main__":
main()
| 4,818 | 30.292208 | 89 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/solver/lr_scheduler.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from bisect import bisect_right
import torch
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
pow_schedule_mode = False,
max_iter = 300000,
lr_pow = 0.9
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.pow_schedule_mode = pow_schedule_mode
self.max_iter = max_iter
self.lr_pow = lr_pow
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
if self.pow_schedule_mode:
scale_running_lr = ((1. - float(self.last_epoch) / self.max_iter) ** self.lr_pow)
return [
base_lr * warmup_factor * scale_running_lr
for base_lr in self.base_lrs
]
else:
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 2,292 | 33.742424 | 93 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/solver/build.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .lr_scheduler import WarmupMultiStepLR
def make_optimizer(cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.USE_ADAM:
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
else:
optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM)
return optimizer
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
pow_schedule_mode = cfg.SOLVER.POW_SCHEDULE,
max_iter = cfg.SOLVER.MAX_ITER,
)
| 1,176 | 29.973684 | 79 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/config/paths_catalog.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Centralized catalog of paths."""
import os
class DatasetCatalog(object):
DATA_DIR = "datasets"
# DATA_DIR = "/share/mhliao/MaskTextSpotterV3/datasets/"
DATASETS = {
"coco_2014_train": (
"coco/train2014",
"coco/annotations/instances_train2014.json",
),
"coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
"coco_2014_minival": (
"coco/val2014",
"coco/annotations/instances_minival2014.json",
),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"icdar_2013_train": ("icdar2013/train_images", "icdar2013/train_gts"),
"icdar_2013_test": ("icdar2013/test_images", "icdar2013/test_gts"),
"rotated_ic13_test_0": ("icdar2013/rotated_test_images_0", "icdar2013/rotated_test_gts_0"),
"rotated_ic13_test_15": ("icdar2013/rotated_test_images_15", "icdar2013/rotated_test_gts_15"),
"rotated_ic13_test_30": ("icdar2013/rotated_test_images_30", "icdar2013/rotated_test_gts_30"),
"rotated_ic13_test_45": ("icdar2013/rotated_test_images_45", "icdar2013/rotated_test_gts_45"),
"rotated_ic13_test_60": ("icdar2013/rotated_test_images_60", "icdar2013/rotated_test_gts_60"),
"rotated_ic13_test_75": ("icdar2013/rotated_test_images_75", "icdar2013/rotated_test_gts_75"),
"rotated_ic13_test_85": ("icdar2013/rotated_test_images_85", "icdar2013/rotated_test_gts_85"),
"rotated_ic13_test_90": ("icdar2013/rotated_test_images_90", "icdar2013/rotated_test_gts_90"),
"rotated_ic13_test_-15": ("icdar2013/rotated_test_images_-15", "icdar2013/rotated_test_gts_-15"),
"rotated_ic13_test_-30": ("icdar2013/rotated_test_images_-30", "icdar2013/rotated_test_gts_-30"),
"rotated_ic13_test_-45": ("icdar2013/rotated_test_images_-45", "icdar2013/rotated_test_gts_-45"),
"rotated_ic13_test_-60": ("icdar2013/rotated_test_images_-60", "icdar2013/rotated_test_gts_-60"),
"rotated_ic13_test_-75": ("icdar2013/rotated_test_images_-75", "icdar2013/rotated_test_gts_-75"),
"rotated_ic13_test_-90": ("icdar2013/rotated_test_images_-90", "icdar2013/rotated_test_gts_-90"),
"icdar_2015_train": ("icdar2015/train_images", "icdar2015/train_gts"),
"icdar_2015_test": (
"icdar2015/test_images",
# "icdar2015/test_gts",
),
"synthtext_train": ("synthtext/train_images", "synthtext/train_gts"),
"synthtext_test": ("synthtext/test_images", "synthtext/test_gts"),
"total_text_train": ("total_text/train_images", "total_text/train_gts"),
"td500_train": ("TD_TR/TD500/train_images", "TD500/train_gts"),
"td500_test": ("TD_TR/TD500/test_images", ),
"tr400_train": ("TD_TR/TR400/train_images", "TR400/train_gts"),
"total_text_test": (
"total_text/test_images",
# "total_text/test_gts",
),
"scut-eng-char_train": (
"scut-eng-char/train_images",
"scut-eng-char/train_gts",
),
}
@staticmethod
def get(name):
if "coco" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
root=os.path.join(data_dir, attrs[0]),
ann_file=os.path.join(data_dir, attrs[1]),
)
return dict(factory="COCODataset", args=args)
elif "icdar_2013" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
# imgs_dir='/tmp/icdar2013/icdar2013/train_images',
# gts_dir='/tmp/icdar2013/icdar2013/train_gts',
)
return dict(args=args, factory="IcdarDataset")
elif "rotated_ic13" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
)
return dict(args=args, factory="IcdarDataset")
elif "icdar_2015" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
# imgs_dir='/tmp/icdar2015/icdar2015/train_images/',
# gts_dir='/tmp/icdar2015/icdar2015/train_gts/',
)
return dict(args=args, factory="IcdarDataset")
elif "synthtext" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
list_file_path=os.path.join(data_dir, "synthtext/train_list.txt"),
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
# imgs_dir='/tmp/synth/SynthText/',
# gts_dir='/tmp/synth_gt/SynthText_GT_E2E/',
)
return dict(args=args, factory="SynthtextDataset")
elif "total_text" in name:
data_dir = DatasetCatalog.DATA_DIR
# data_dir = '/tmp/total_text/'
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
# imgs_dir='/tmp/total_text/total_text/train_images/',
# gts_dir='/tmp/total_text/total_text/train_gts/',
)
return dict(args=args, factory="TotaltextDataset")
elif "scut-eng-char" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(
use_charann=True,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=os.path.join(data_dir, attrs[1]),
# imgs_dir='/tmp/scut-eng-char/scut-eng-char/train_images/',
# gts_dir='/tmp/scut-eng-char/scut-eng-char/train_gts/',
)
return dict(args=args, factory="ScutDataset")
elif "td500" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
)
return dict(args=args, factory="TotaltextDataset")
elif "tr400" in name:
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
if len(attrs) > 1:
gts_dir = os.path.join(data_dir, attrs[1])
else:
gts_dir = None
args = dict(
use_charann=False,
imgs_dir=os.path.join(data_dir, attrs[0]),
gts_dir=gts_dir,
)
return dict(args=args, factory="TotaltextDataset")
raise RuntimeError("Dataset not available: {}".format(name))
class ModelCatalog(object):
S3_C2_DETECTRON_URL = "https://dl.fbaipublicfiles.com/detectron"
C2_IMAGENET_MODELS = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
"MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
"MSRA/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
"MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
"MSRA/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
"FAIR/20171220/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
}
C2_DETECTRON_SUFFIX = "output/train/{}coco_2014_train%3A{}coco_2014_valminusminival/generalized_rcnn/model_final.pkl"
C2_DETECTRON_MODELS = {
"35857197/e2e_faster_rcnn_R-50-C4_1x": "01_33_49.iAX0mXvW",
"35857345/e2e_faster_rcnn_R-50-FPN_1x": "01_36_30.cUF7QR7I",
"35857890/e2e_faster_rcnn_R-101-FPN_1x": "01_38_50.sNxI7sX7",
"36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "06_31_39.5MIHi1fZ",
"35858791/e2e_mask_rcnn_R-50-C4_1x": "01_45_57.ZgkA7hPB",
"35858933/e2e_mask_rcnn_R-50-FPN_1x": "01_48_14.DzEQe4wC",
"35861795/e2e_mask_rcnn_R-101-FPN_1x": "02_31_37.KqyEK4tT",
"36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "06_35_59.RZotkLKI",
"37129812/e2e_mask_rcnn_X-152-32x8d-FPN-IN5k_1.44x": "09_35_36.8pzTQKYK",
# keypoints
"37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "08_42_54.kdzV35ao"
}
@staticmethod
def get(name):
if name.startswith("Caffe2Detectron/COCO"):
return ModelCatalog.get_c2_detectron_12_2017_baselines(name)
if name.startswith("ImageNetPretrained"):
return ModelCatalog.get_c2_imagenet_pretrained(name)
raise RuntimeError("model not present in the catalog {}".format(name))
@staticmethod
def get_c2_imagenet_pretrained(name):
prefix = ModelCatalog.S3_C2_DETECTRON_URL
name = name[len("ImageNetPretrained/") :]
name = ModelCatalog.C2_IMAGENET_MODELS[name]
if 'resnet34' in name or 'resnet18' in name:
return name
url = "/".join([prefix, name])
return url
@staticmethod
def get_c2_detectron_12_2017_baselines(name):
# Detectron C2 models are stored following the structure
# prefix/<model_id>/2012_2017_baselines/<model_name>.yaml.<signature>/suffix
# we use as identifiers in the catalog Caffe2Detectron/COCO/<model_id>/<model_name>
prefix = ModelCatalog.S3_C2_DETECTRON_URL
suffix = ModelCatalog.C2_DETECTRON_SUFFIX
# remove identification prefix
name = name[len("Caffe2Detectron/COCO/") :]
# split in <model_id> and <model_name>
model_id, model_name = name.split("/")
# parsing to make it match the url address from the Caffe2 models
model_name = "{}.yaml".format(model_name)
signature = ModelCatalog.C2_DETECTRON_MODELS[name]
unique_name = ".".join([model_name, signature])
url = "/".join([prefix, model_id, "12_2017_baselines", unique_name, suffix])
return url
| 11,140 | 45.810924 | 121 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/batch_norm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters
are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x):
# Cast all fixed parameters to half() if necessary
if x.dtype == torch.float16:
self.weight = self.weight.half()
self.bias = self.bias.half()
self.running_mean = self.running_mean.half()
self.running_var = self.running_var.half()
scale = self.weight * self.running_var.rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
| 1,093 | 34.290323 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/roi_pool.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
from apex import amp
class _ROIPool(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
output, argmax = _C.roi_pool_forward(
input, roi, spatial_scale, output_size[0], output_size[1]
)
ctx.save_for_backward(input, roi, argmax)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, argmax = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_pool_backward(
grad_output,
input,
rois,
argmax,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
)
return grad_input, None, None, None
roi_pool = _ROIPool.apply
class ROIPool(nn.Module):
def __init__(self, output_size, spatial_scale):
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
@amp.float_function
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ")"
return tmpstr
| 1,899 | 28.230769 | 74 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/roi_align.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
from apex import amp
class _ROIAlign(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = _C.roi_align_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
rois, = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = _C.roi_align_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None
roi_align = _ROIAlign.apply
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
@amp.float_function
def forward(self, input, rois):
return roi_align(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr
| 2,154 | 29.785714 | 85 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/smooth_l1_loss.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
# TODO maybe push this to nn?
def smooth_l1_loss(input, target, beta=1. / 9, size_average=True):
"""
very similar to the smooth_l1_loss from pytorch, but with
the extra beta parameter
"""
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)
if size_average:
return loss.mean()
return loss.sum()
| 481 | 27.352941 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import glob
import os.path
import torch
try:
from torch.utils.cpp_extension import load as load_ext
from torch.utils.cpp_extension import CUDA_HOME
except ImportError:
raise ImportError("The cpp layer extensions requires PyTorch 0.4 or higher")
def _load_C_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
this_dir = os.path.dirname(this_dir)
this_dir = os.path.join(this_dir, "csrc")
main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
source = main_file + source_cpu
extra_cflags = []
if torch.cuda.is_available() and CUDA_HOME is not None:
source.extend(source_cuda)
extra_cflags = ["-DWITH_CUDA"]
source = [os.path.join(this_dir, s) for s in source]
extra_include_paths = [this_dir]
return load_ext(
"torchvision",
source,
extra_cflags=extra_cflags,
extra_include_paths=extra_include_paths,
)
_C = _load_C_extensions()
| 1,165 | 28.15 | 80 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
helper class that supports empty tensors on some nn functions.
Ideally, add support directly in PyTorch to empty tensors in
those functions.
This can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
import math
import torch
from torch import nn
from torch.nn.modules.utils import _ntuple
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
def forward(self, x):
if x.numel() > 0:
return super(Conv2d, self).forward(x)
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class ConvTranspose2d(torch.nn.ConvTranspose2d):
def forward(self, x):
if x.numel() > 0:
return super(ConvTranspose2d, self).forward(x)
# get output shape
output_shape = [
(i - 1) * d - 2 * p + (di * (k - 1) + 1) + op
for i, p, di, k, d, op in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
self.output_padding,
)
]
output_shape = [x.shape[0], self.bias.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
def interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
if (
scale_factor is not None
and isinstance(scale_factor, tuple)
and len(scale_factor) != dim
):
raise ValueError(
"scale_factor shape must match input shape. "
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
)
def _output_size(dim):
_check_size_scale_factor(dim)
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
# math.floor might return float in py2.7
return [
int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert len(kernel_size) == 2
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from maskrcnn_benchmark.layers import ModulatedDeformConv
offset_channels = offset_base_channels * 3 #default: 27
conv_block = ModulatedDeformConv
else:
from maskrcnn_benchmark.layers import DeformConv
offset_channels = offset_base_channels * 2 #default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride= stride,
padding= dilation,
groups=1,
dilation=dilation
)
for l in [self.offset,]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride= stride,
padding=dilation,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = dilation
self.dilation = dilation
def forward(self, x):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset = self.offset(x)
x = self.conv(x, offset)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :18, :, :]
mask = offset_mask[:, -9:, :, :].sigmoid()
x = self.conv(x, offset, mask)
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
| 6,035 | 31.451613 | 88 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .batch_norm import FrozenBatchNorm2d
from .misc import Conv2d
from .misc import DFConv2d
from .misc import ConvTranspose2d
from .misc import interpolate
from .nms import nms
from .roi_align import ROIAlign
from .roi_align import roi_align
from .roi_pool import ROIPool
from .roi_pool import roi_pool
from .smooth_l1_loss import smooth_l1_loss
from .dcn.deform_conv_func import deform_conv, modulated_deform_conv
from .dcn.deform_conv_module import DeformConv, ModulatedDeformConv, ModulatedDeformConvPack
from .dcn.deform_pool_func import deform_roi_pooling
from .dcn.deform_pool_module import DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack
__all__ = [
"nms",
"roi_align",
"ROIAlign",
"roi_pool",
"ROIPool",
"smooth_l1_loss",
"Conv2d",
"DFConv2d",
"ConvTranspose2d",
"interpolate",
"BatchNorm2d",
"FrozenBatchNorm2d",
'deform_conv',
'modulated_deform_conv',
'DeformConv',
'ModulatedDeformConv',
'ModulatedDeformConvPack',
'deform_roi_pooling',
'DeformRoIPooling',
'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack',
]
# __all__ = ["nms", "roi_align", "ROIAlign", "roi_pool", "ROIPool", "smooth_l1_loss", "Conv2d", "ConvTranspose2d", "interpolate", "FrozenBatchNorm2d"]
| 1,372 | 30.930233 | 150 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_conv_func.py | import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from maskrcnn_benchmark import _C
class DeformConvFunction(Function):
@staticmethod
def forward(
ctx,
input,
offset,
weight,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
im2col_step=64
):
if input is not None and input.dim() != 4:
raise ValueError(
"Expected 4D tensor as input, got {}D tensor instead.".format(
input.dim()))
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(
DeformConvFunction._output_size(input, weight, ctx.padding,
ctx.dilation, ctx.stride))
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
if not input.is_cuda:
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert (input.shape[0] %
cur_im2col_step) == 0, 'im2col step must divide batchsize'
_C.deform_conv_forward(
input,
weight,
offset,
output,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset, weight = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if not grad_output.is_cuda:
raise NotImplementedError
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert (input.shape[0] %
cur_im2col_step) == 0, 'im2col step must divide batchsize'
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(
input,
offset,
grad_output,
grad_input,
grad_offset,
weight,
ctx.bufs_[0],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
cur_im2col_step
)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_parameters(
input,
offset,
grad_output,
grad_weight,
ctx.bufs_[0],
ctx.bufs_[1],
weight.size(3),
weight.size(2),
ctx.stride[1],
ctx.stride[0],
ctx.padding[1],
ctx.padding[0],
ctx.dilation[1],
ctx.dilation[0],
ctx.groups,
ctx.deformable_groups,
1,
cur_im2col_step
)
return (grad_input, grad_offset, grad_weight, None, None, None, None, None)
@staticmethod
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range(input.dim() - 2):
in_size = input.size(d + 2)
pad = padding[d]
kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
stride_ = stride[d]
output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
if not all(map(lambda s: s > 0, output_size)):
raise ValueError(
"convolution input is too small (output would be {})".format(
'x'.join(map(str, output_size))))
return output_size
class ModulatedDeformConvFunction(Function):
@staticmethod
def forward(
ctx,
input,
offset,
mask,
weight,
bias=None,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1
):
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.with_bias = bias is not None
if not ctx.with_bias:
bias = input.new_empty(1) # fake tensor
if not input.is_cuda:
raise NotImplementedError
if weight.requires_grad or mask.requires_grad or offset.requires_grad \
or input.requires_grad:
ctx.save_for_backward(input, offset, mask, weight, bias)
output = input.new_empty(
ModulatedDeformConvFunction._infer_shape(ctx, input, weight))
ctx._bufs = [input.new_empty(0), input.new_empty(0)]
_C.modulated_deform_conv_forward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
output,
ctx._bufs[1],
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
grad_mask = torch.zeros_like(mask)
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
_C.modulated_deform_conv_backward(
input,
weight,
bias,
ctx._bufs[0],
offset,
mask,
ctx._bufs[1],
grad_input,
grad_weight,
grad_bias,
grad_offset,
grad_mask,
grad_output,
weight.shape[2],
weight.shape[3],
ctx.stride,
ctx.stride,
ctx.padding,
ctx.padding,
ctx.dilation,
ctx.dilation,
ctx.groups,
ctx.deformable_groups,
ctx.with_bias
)
if not ctx.with_bias:
grad_bias = None
return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
None, None, None, None, None)
@staticmethod
def _infer_shape(ctx, input, weight):
n = input.size(0)
channels_out = weight.size(0)
height, width = input.shape[2:4]
kernel_h, kernel_w = weight.shape[2:4]
height_out = (height + 2 * ctx.padding -
(ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1
width_out = (width + 2 * ctx.padding -
(ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1
return n, channels_out, height_out, width_out
deform_conv = DeformConvFunction.apply
modulated_deform_conv = ModulatedDeformConvFunction.apply
| 8,309 | 30.596958 | 83 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_pool_func.py | import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from maskrcnn_benchmark import _C
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(
ctx,
data,
rois,
offset,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0
):
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = out_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert 0.0 <= ctx.trans_std <= 1.0
if not data.is_cuda:
raise NotImplementedError
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
_C.deform_psroi_pooling_forward(
data,
rois,
offset,
output,
output_count,
ctx.no_trans,
ctx.spatial_scale,
ctx.out_channels,
ctx.group_size,
ctx.out_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std
)
if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
_C.deform_psroi_pooling_backward(
grad_output,
data,
rois,
offset,
output_count,
grad_input,
grad_offset,
ctx.no_trans,
ctx.spatial_scale,
ctx.out_channels,
ctx.group_size,
ctx.out_size,
ctx.part_size,
ctx.sample_per_part,
ctx.trans_std
)
return (grad_input, grad_rois, grad_offset, None, None, None, None, None, None, None, None)
deform_roi_pooling = DeformRoIPoolingFunction.apply
| 2,595 | 26.041667 | 99 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_pool_module.py | from torch import nn
from .deform_pool_func import deform_roi_pooling
class DeformRoIPooling(nn.Module):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = out_size
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = out_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_channels=1024):
super(DeformRoIPoolingPack,
self).__init__(spatial_scale, out_size, out_channels, no_trans,
group_size, part_size, sample_per_part, trans_std)
self.deform_fc_channels = deform_fc_channels
if not no_trans:
self.offset_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
deform_fc_channels=1024):
super(ModulatedDeformRoIPoolingPack, self).__init__(
spatial_scale, out_size, out_channels, no_trans, group_size,
part_size, sample_per_part, trans_std)
self.deform_fc_channels = deform_fc_channels
if not no_trans:
self.offset_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
self.mask_fc = nn.Sequential(
nn.Linear(self.out_size * self.out_size * self.out_channels,
self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.out_size * self.out_size * 1),
nn.Sigmoid())
self.mask_fc[2].weight.data.zero_()
self.mask_fc[2].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
mask = self.mask_fc(x.view(n, -1))
mask = mask.view(n, 1, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std) * mask
| 6,307 | 40.774834 | 79 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/layers/dcn/deform_conv_module.py | import math
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from .deform_conv_func import deform_conv, modulated_deform_conv
class DeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False
):
assert not bias
super(DeformConv, self).__init__()
self.with_bias = bias
assert in_channels % groups == 0, \
'in_channels {} cannot be divisible by groups {}'.format(
in_channels, groups)
assert out_channels % groups == 0, \
'out_channels {} cannot be divisible by groups {}'.format(
out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups,
*self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input, offset):
return deform_conv(input, offset, self.weight, self.stride,
self.padding, self.dilation, self.groups,
self.deformable_groups)
def __repr__(self):
return "".join([
"{}(".format(self.__class__.__name__),
"in_channels={}, ".format(self.in_channels),
"out_channels={}, ".format(self.out_channels),
"kernel_size={}, ".format(self.kernel_size),
"stride={}, ".format(self.stride),
"dilation={}, ".format(self.dilation),
"padding={}, ".format(self.padding),
"groups={}, ".format(self.groups),
"deformable_groups={}, ".format(self.deformable_groups),
"bias={})".format(self.with_bias),
])
class ModulatedDeformConv(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True
):
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.weight = nn.Parameter(torch.Tensor(
out_channels,
in_channels // groups,
*self.kernel_size
))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, input, offset, mask):
return modulated_deform_conv(
input, offset, mask, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups, self.deformable_groups)
def __repr__(self):
return "".join([
"{}(".format(self.__class__.__name__),
"in_channels={}, ".format(self.in_channels),
"out_channels={}, ".format(self.out_channels),
"kernel_size={}, ".format(self.kernel_size),
"stride={}, ".format(self.stride),
"dilation={}, ".format(self.dilation),
"padding={}, ".format(self.padding),
"groups={}, ".format(self.groups),
"deformable_groups={}, ".format(self.deformable_groups),
"bias={})".format(self.with_bias),
])
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True):
super(ModulatedDeformConvPack, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, deformable_groups, bias)
self.conv_offset_mask = nn.Conv2d(
self.in_channels // self.groups,
self.deformable_groups * 3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv(
input, offset, mask, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups, self.deformable_groups)
| 5,802 | 31.601124 | 78 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/engine/text_inference.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import os
import pickle
import subprocess
import time
import cv2
import numpy as np
import torch
from maskrcnn_benchmark.utils.chars import char2num, get_tight_rect, getstr_grid
from PIL import Image, ImageDraw
from tqdm import tqdm
from ..utils.comm import is_main_process, scatter_gather, synchronize
import pdb
# TO DO: format output with dictionnary
def compute_on_dataset(model, data_loader, device, cfg):
model.eval()
results_dict = {}
seg_results = []
cpu_device = torch.device("cpu")
total_time = 0
for _, batch in tqdm(enumerate(data_loader)):
images, targets, image_paths = batch
images = images.to(device)
with torch.no_grad():
if cfg.MODEL.SEG_ON:
predictions, proposals, seg_results_dict = model(
images
)
seg_results.append(
[image_paths, proposals, seg_results_dict['rotated_boxes'], seg_results_dict['polygons'], seg_results_dict['preds'], seg_results_dict['scores']]
)
# if cfg.MODEL.MASK_ON and predictions is not None:
if predictions is not None:
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
global_predictions = predictions[0]
char_predictions = predictions[1]
char_mask = char_predictions["char_mask"]
boxes = char_predictions["boxes"]
seq_words = char_predictions["seq_outputs"]
seq_scores = char_predictions["seq_scores"]
detailed_seq_scores = char_predictions["detailed_seq_scores"]
global_predictions = [o.to(cpu_device) for o in global_predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
char_mask,
boxes,
seq_words,
seq_scores,
detailed_seq_scores,
]
}
)
else:
global_predictions = [o.to(cpu_device) for o in predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
]
}
)
else:
predictions = model(images)
if predictions is not None:
if not (cfg.MODEL.CHAR_MASK_ON and cfg.SEQUENCE.SEQ_ON):
global_predictions = predictions
global_predictions = [o.to(cpu_device) for o in global_predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
]
}
)
else:
global_predictions = predictions[0]
char_predictions = predictions[1]
if cfg.MODEL.CHAR_MASK_ON:
char_mask = char_predictions["char_mask"]
else:
char_mask = None
boxes = char_predictions["boxes"]
seq_words = char_predictions["seq_outputs"]
seq_scores = char_predictions["seq_scores"]
detailed_seq_scores = char_predictions["detailed_seq_scores"]
global_predictions = [o.to(cpu_device) for o in global_predictions]
results_dict.update(
{
image_paths[0]: [
global_predictions[0],
char_mask,
boxes,
seq_words,
seq_scores,
detailed_seq_scores,
]
}
)
return results_dict, seg_results
def polygon2rbox(polygon, image_height, image_width):
poly = np.array(polygon).reshape((-1, 2))
rect = cv2.minAreaRect(poly)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, 0, 0, image_height, image_width, 1)
pts = list(map(int, pts))
return pts
def mask2polygon(mask, box, im_size, threshold=0.5, output_folder=None):
# mask 32*128
image_width, image_height = im_size[0], im_size[1]
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cls_polys = (mask * 255).astype(np.uint8)
poly_map = np.array(Image.fromarray(cls_polys).resize((box_w, box_h)))
poly_map = poly_map.astype(np.float32) / 255
poly_map = cv2.GaussianBlur(poly_map, (3, 3), sigmaX=3)
ret, poly_map = cv2.threshold(poly_map, threshold, 1, cv2.THRESH_BINARY)
if "total_text" in output_folder or "cute80" in output_folder:
SE1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
poly_map = cv2.erode(poly_map, SE1)
poly_map = cv2.dilate(poly_map, SE1)
poly_map = cv2.morphologyEx(poly_map, cv2.MORPH_CLOSE, SE1)
try:
_, contours, _ = cv2.findContours(
(poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE
)
except:
contours, _ = cv2.findContours(
(poly_map * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE
)
if len(contours) == 0:
# print(contours)
# print(len(contours))
return None
max_area = 0
max_cnt = contours[0]
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_cnt = cnt
# perimeter = cv2.arcLength(max_cnt, True)
epsilon = 0.01 * cv2.arcLength(max_cnt, True)
approx = cv2.approxPolyDP(max_cnt, epsilon, True)
pts = approx.reshape((-1, 2))
pts[:, 0] = pts[:, 0] + box[0]
pts[:, 1] = pts[:, 1] + box[1]
polygon = list(pts.reshape((-1,)))
polygon = list(map(int, polygon))
if len(polygon) < 6:
return None
else:
SE1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
poly_map = cv2.erode(poly_map, SE1)
poly_map = cv2.dilate(poly_map, SE1)
poly_map = cv2.morphologyEx(poly_map, cv2.MORPH_CLOSE, SE1)
idy, idx = np.where(poly_map == 1)
xy = np.vstack((idx, idy))
xy = np.transpose(xy)
hull = cv2.convexHull(xy, clockwise=True)
# reverse order of points.
if hull is None:
return None
hull = hull[::-1]
# find minimum area bounding box.
rect = cv2.minAreaRect(hull)
corners = cv2.boxPoints(rect)
corners = np.array(corners, dtype="int")
pts = get_tight_rect(corners, box[0], box[1], image_height, image_width, 1)
polygon = [x * 1.0 for x in pts]
polygon = list(map(int, polygon))
return polygon
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = scatter_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
return predictions
def format_output(out_dir, boxes, img_name):
with open(
os.path.join(out_dir, "res_" + img_name.split(".")[0] + ".txt"), "wt"
) as res:
## char score save dir
ssur_name = os.path.join(out_dir, "res_" + img_name.split(".")[0])
for i, box in enumerate(boxes):
save_name = ssur_name + "_" + str(i) + ".pkl"
save_dict = {}
if "total_text" in out_dir or "cute80" in out_dir:
# np.save(save_name, box[-2])
save_dict["seg_char_scores"] = box[-3]
save_dict["seq_char_scores"] = box[-2]
box = (
",".join([str(x) for x in box[:4]])
+ ";"
+ ",".join([str(x) for x in box[4 : 4 + int(box[-1])]])
+ ";"
+ ",".join([str(x) for x in box[4 + int(box[-1]) : -3]])
+ ","
+ save_name
)
else:
save_dict["seg_char_scores"] = box[-2]
save_dict["seq_char_scores"] = box[-1]
np.save(save_name, box[-1])
box = ",".join([str(x) for x in box[:-2]]) + "," + save_name
with open(save_name, "wb") as f:
pickle.dump(save_dict, f, protocol=2)
res.write(box + "\n")
def format_seg_output(results_dir, rotated_boxes_this_image, polygons_this_image, scores, img_name, ratio):
height_ratio, width_ratio = ratio
with open(
os.path.join(results_dir, "res_" + img_name.split(".")[0] + ".txt"), "wt"
) as res:
if "total_text" in results_dir or "cute80" in results_dir:
for i, box in enumerate(polygons_this_image):
box = box[0]
box[0::2] = box[0::2] * width_ratio
box[1::2] = box[1::2] * height_ratio
save_dict = {}
# result = ",".join([str(int(x[0])) + ',' +str(int(x[1])) for x in box])
result = ",".join([str(int(x)) for x in box])
score = scores[i].item()
res.write(result + ',' + str(score) + "\n")
else:
for i, box in enumerate(rotated_boxes_this_image):
box[0::2] = box[0::2] * width_ratio
box[1::2] = box[1::2] * height_ratio
save_dict = {}
result = ",".join([str(int(x[0])) + ',' +str(int(x[1])) for x in box])
score = scores[i].item()
res.write(result + ',' + str(score) + "\n")
def process_char_mask(char_masks, boxes, threshold=192):
texts, rec_scores, rec_char_scores, char_polygons = [], [], [], []
for index in range(char_masks.shape[0]):
box = list(boxes[index])
box = list(map(int, box))
text, rec_score, rec_char_score, char_polygon = getstr_grid(
char_masks[index, :, :, :].copy(), box, threshold=threshold
)
texts.append(text)
rec_scores.append(rec_score)
rec_char_scores.append(rec_char_score)
char_polygons.append(char_polygon)
# segmss.append(segms)
return texts, rec_scores, rec_char_scores, char_polygons
def creat_color_map(n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append((r, g, b, 200))
return maps
def visualization(image, polygons, resize_ratio, colors, char_polygons=None, words=None):
draw = ImageDraw.Draw(image, "RGBA")
for polygon in polygons:
# draw.polygon(polygon, fill=None, outline=(0, 255, 0, 255))
# print(polygon)
polygon.append(polygon[0])
polygon.append(polygon[1])
# print(polygon)
color = '#33FF33'
draw.line(polygon, fill=color, width=5)
# if char_polygons is not None:
# for i, char_polygon in enumerate(char_polygons):
# for j, polygon in enumerate(char_polygon):
# polygon = [int(x * resize_ratio) for x in polygon]
# char = words[i][j]
# color = colors[char2num(char)]
# draw.polygon(polygon, fill=color, outline=color)
def vis_seg_map(image_path, seg_map, rotated_boxes, polygons_this_image, proposals, vis_dir):
img_name = image_path.split("/")[-1]
image = cv2.imread(image_path)
height, width, _ = image.shape
seg_map = seg_map.data.cpu().numpy()
img = Image.fromarray(image).convert("RGB")
# height_ratio = height / seg_map.shape[1]
# width_ratio = width / seg_map.shape[2]
# print('seg_map.shape:', seg_map.shape)
# print('image.shape:', image.shape)
seg_image = (
Image.fromarray((seg_map[0, :proposals.size[1], :proposals.size[0]] * 255).astype(np.uint8))
.convert("RGB")
.resize((width, height))
)
visu_image = Image.blend(seg_image, img, 0.5)
img_draw = ImageDraw.Draw(visu_image)
if "total_text" in vis_dir or "cute80" in vis_dir:
for box in polygons_this_image:
# box[:, 0] = box[:, 0]
# box[:, 1] = box[:, 1]
tuple_box = [tuple(x) for x in box[0].reshape(-1, 2).tolist()]
tuple_box.append(tuple_box[0])
img_draw.line(tuple_box, fill=(0, 255, 0), width=5)
else:
for box in rotated_boxes:
# box[:, 0] = box[:, 0]
# box[:, 1] = box[:, 1]
tuple_box = [tuple(x) for x in box.tolist()]
tuple_box.append(tuple_box[0])
img_draw.line(tuple_box, fill=(0, 255, 0), width=5)
visu_image.save(vis_dir + "/seg_" + img_name)
def prepare_results_for_evaluation(
predictions, output_folder, model_name, seg_predictions=None, vis=False, cfg=None
):
results_dir = os.path.join(output_folder, model_name + "_results")
if not os.path.isdir(results_dir):
os.mkdir(results_dir)
seg_results_dir = os.path.join(output_folder, model_name + "_seg_results")
if not os.path.isdir(seg_results_dir):
os.mkdir(seg_results_dir)
if vis:
visu_dir = os.path.join(output_folder, model_name + "_visu")
if not os.path.isdir(visu_dir):
os.mkdir(visu_dir)
seg_visu_dir = os.path.join(output_folder, model_name + "_seg_visu")
if not os.path.isdir(seg_visu_dir):
os.mkdir(seg_visu_dir)
if len(seg_predictions) > 0:
for seg_prediction in seg_predictions:
image_paths, proposals, rotated_boxes, polygons, seg_maps, seg_scores = (
seg_prediction[0],
seg_prediction[1],
seg_prediction[2],
seg_prediction[3],
seg_prediction[4],
seg_prediction[5],
)
for batch_id in range(len(image_paths)):
image_path = image_paths[batch_id]
im_name = image_path.split("/")[-1]
image = cv2.imread(image_path)
height, width, _ = image.shape
rotated_boxes_this_image = rotated_boxes[batch_id]
polygons_this_image = polygons[batch_id]
proposals_this_image = proposals[batch_id]
seg_map = seg_maps[batch_id]
seg_score = seg_scores[batch_id]
height, width, _ = image.shape
height_ratio = height / proposals_this_image.size[1]
width_ratio = width / proposals_this_image.size[0]
format_seg_output(seg_results_dir, rotated_boxes_this_image, polygons_this_image, seg_score, im_name, (height_ratio, width_ratio))
if vis:
vis_seg_map(image_path, seg_map, rotated_boxes_this_image, polygons_this_image, proposals_this_image, seg_visu_dir)
if (not cfg.MODEL.TRAIN_DETECTION_ONLY):
for image_path, prediction in predictions.items():
im_name = image_path.split("/")[-1]
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
global_prediction, char_mask, boxes_char, seq_words, seq_scores, detailed_seq_scores = (
prediction[0],
prediction[1],
prediction[2],
prediction[3],
prediction[4],
prediction[5],
)
if char_mask is not None:
words, rec_scores, rec_char_scoress, char_polygons = process_char_mask(
char_mask, boxes_char
)
else:
global_prediction = prediction[0]
test_image_width, test_image_height = global_prediction.size
img = Image.open(image_path)
width, height = img.size
resize_ratio = float(height) / test_image_height
global_prediction = global_prediction.resize((width, height))
boxes = global_prediction.bbox.tolist()
if cfg.MODEL.ROI_BOX_HEAD.INFERENCE_USE_BOX:
scores = global_prediction.get_field("scores").tolist()
if not cfg.MODEL.SEG.USE_SEG_POLY:
masks = global_prediction.get_field("mask").cpu().numpy()
else:
masks = global_prediction.get_field("masks").get_polygons()
result_logs = []
polygons = []
for k, box in enumerate(boxes):
if box[2] - box[0] < 1 or box[3] - box[1] < 1:
continue
box = list(map(int, box))
if not cfg.MODEL.SEG.USE_SEG_POLY:
mask = masks[k, 0, :, :]
polygon = mask2polygon(
mask, box, img.size, threshold=0.5, output_folder=output_folder
)
else:
polygon = list(masks[k].get_polygons()[0].cpu().numpy())
if not ("total_text" in output_folder or "cute80" in output_folder):
polygon = polygon2rbox(polygon, height, width)
if polygon is None:
polygon = [
box[0],
box[1],
box[2],
box[1],
box[2],
box[3],
box[0],
box[3],
]
continue
polygons.append(polygon)
if cfg.MODEL.ROI_BOX_HEAD.INFERENCE_USE_BOX:
score = scores[k]
else:
score = 1.0
if cfg.MODEL.CHAR_MASK_ON or cfg.SEQUENCE.SEQ_ON:
if char_mask is None:
word = 'aaa'
rec_score = 1.0
char_score = None
else:
word = words[k]
rec_score = rec_scores[k]
char_score = rec_char_scoress[k]
seq_word = seq_words[k]
seq_char_scores = seq_scores[k]
seq_score = sum(seq_char_scores) / float(len(seq_char_scores))
detailed_seq_score = detailed_seq_scores[k]
detailed_seq_score = np.squeeze(np.array(detailed_seq_score), axis=1)
else:
word = 'aaa'
rec_score = 1.0
char_score = [1.0, 1.0, 1.0]
seq_word = 'aaa'
seq_char_scores = [1.0, 1.0, 1.0]
seq_score = 1.0
detailed_seq_score = None
if "total_text" in output_folder or "cute80" in output_folder:
result_log = (
[int(x * 1.0) for x in box[:4]]
+ polygon
+ [word]
+ [seq_word]
+ [score]
+ [rec_score]
+ [seq_score]
+ [char_score]
+ [detailed_seq_score]
+ [len(polygon)]
)
else:
result_log = (
[int(x * 1.0) for x in box[:4]]
+ polygon
+ [word]
+ [seq_word]
+ [score]
+ [rec_score]
+ [seq_score]
+ [char_score]
+ [detailed_seq_score]
)
result_logs.append(result_log)
if vis:
colors = creat_color_map(37, 255)
if cfg.MODEL.CHAR_MASK_ON:
visualization(img, polygons, resize_ratio, colors, char_polygons, words)
else:
visualization(img, polygons, resize_ratio, colors)
img.save(os.path.join(visu_dir, im_name))
format_output(results_dir, result_logs, im_name)
def inference(
model,
data_loader,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
model_name=None,
cfg=None,
):
# convert to a torch.device for efficiency
model_name = model_name.split(".")[0] + "_" + str(cfg.INPUT.MIN_SIZE_TEST)
predictions_path = os.path.join(output_folder, model_name + "_predictions.pth")
seg_predictions_path = os.path.join(
output_folder, model_name + "_seg_predictions.pth"
)
# if os.path.isfile(predictions_path) and os.path.isfile(seg_predictions_path):
if False:
predictions = torch.load(predictions_path)
seg_predictions = torch.load(seg_predictions_path)
else:
device = torch.device(device)
num_devices = (
torch.distributed.get_world_size()
if torch.distributed.is_initialized()
else 1
)
logger = logging.getLogger("maskrcnn_benchmark.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} images".format(len(dataset)))
start_time = time.time()
predictions, seg_predictions = compute_on_dataset(
model, data_loader, device, cfg
)
# wait for all processes to complete before measuring the time
synchronize()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
logger.info(
"Total inference time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
# predictions = _accumulate_predictions_from_multiple_gpus(predictions)
# if not is_main_process():
# return
if output_folder:
torch.save(predictions, predictions_path)
torch.save(seg_predictions, seg_predictions_path)
prepare_results_for_evaluation(
predictions,
output_folder,
model_name,
seg_predictions=seg_predictions,
vis=cfg.TEST.VIS,
cfg=cfg
)
| 23,471 | 40.839572 | 164 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/engine/trainer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import time
import torch
from maskrcnn_benchmark.utils.comm import get_world_size, is_main_process
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
import torch.distributed as dist
from apex import amp
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k, v in loss_dict.items():
loss_names.append(k)
all_losses.append(v)
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
tb_logger,
cfg,
local_rank,
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
data_time = time.time() - end
arguments["iteration"] = iteration
scheduler.step()
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
# losses.backward()
# Note: If mixed precision is not used, this ends up doing nothing
# Otherwise apply loss scaling for mixed-precision recipe
with amp.scale_loss(losses, optimizer) as scaled_losses:
scaled_losses.backward()
if cfg.SOLVER.USE_ADAM:
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if local_rank == 0 and (iteration % cfg.SOLVER.DISPLAY_FREQ == 0 or iteration == (max_iter - 1)):
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
for tag, value in loss_dict_reduced.items():
tb_logger.scalar_summary(tag, value.item(), iteration)
if local_rank == 0 and iteration % checkpoint_period == 0 and iteration > 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if local_rank == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
| 4,397 | 34.184 | 105 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/c2_model_loading.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import pickle
from collections import OrderedDict
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
def _rename_basic_resnet_weights(layer_keys):
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [k.replace(".w", ".weight") for k in layer_keys]
layer_keys = [k.replace(".bn", "_bn") for k in layer_keys]
layer_keys = [k.replace(".b", ".bias") for k in layer_keys]
layer_keys = [k.replace("_bn.s", "_bn.scale") for k in layer_keys]
layer_keys = [k.replace(".biasranch", ".branch") for k in layer_keys]
layer_keys = [k.replace("bbox.pred", "bbox_pred") for k in layer_keys]
layer_keys = [k.replace("cls.score", "cls_score") for k in layer_keys]
layer_keys = [k.replace("res.conv1_", "conv1_") for k in layer_keys]
# RPN / Faster RCNN
layer_keys = [k.replace(".biasbox", ".bbox") for k in layer_keys]
layer_keys = [k.replace("conv.rpn", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox.pred", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [k.replace("rpn.cls.logits", "rpn.cls_logits") for k in layer_keys]
# Affine-Channel -> BatchNorm enaming
layer_keys = [k.replace("_bn.scale", "_bn.weight") for k in layer_keys]
# Make torchvision-compatible
layer_keys = [k.replace("conv1_bn.", "bn1.") for k in layer_keys]
layer_keys = [k.replace("res2.", "layer1.") for k in layer_keys]
layer_keys = [k.replace("res3.", "layer2.") for k in layer_keys]
layer_keys = [k.replace("res4.", "layer3.") for k in layer_keys]
layer_keys = [k.replace("res5.", "layer4.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2a_bn.", ".bn1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2b_bn.", ".bn2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
layer_keys = [k.replace(".branch2c_bn.", ".bn3.") for k in layer_keys]
layer_keys = [k.replace(".branch1.", ".downsample.0.") for k in layer_keys]
layer_keys = [k.replace(".branch1_bn.", ".downsample.1.") for k in layer_keys]
return layer_keys
def _rename_fpn_weights(layer_keys, stage_names):
for mapped_idx, stage_name in enumerate(stage_names, 1):
suffix = ""
if mapped_idx < 4:
suffix = ".lateral"
layer_keys = [
k.replace("fpn.inner.layer{}.sum{}".format(stage_name, suffix), "fpn_inner{}".format(mapped_idx)) for k in layer_keys
]
layer_keys = [k.replace("fpn.layer{}.sum".format(stage_name), "fpn_layer{}".format(mapped_idx)) for k in layer_keys]
layer_keys = [k.replace("rpn.conv.fpn2", "rpn.conv") for k in layer_keys]
layer_keys = [k.replace("rpn.bbox_pred.fpn2", "rpn.bbox_pred") for k in layer_keys]
layer_keys = [
k.replace("rpn.cls_logits.fpn2", "rpn.cls_logits") for k in layer_keys
]
return layer_keys
def _rename_weights_for_resnet(weights, stage_names):
original_keys = sorted(weights.keys())
layer_keys = sorted(weights.keys())
# for X-101, rename output to fc1000 to avoid conflicts afterwards
layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys]
layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys]
# performs basic renaming: _ -> . , etc
layer_keys = _rename_basic_resnet_weights(layer_keys)
# FPN
layer_keys = _rename_fpn_weights(layer_keys, stage_names)
# Mask R-CNN
layer_keys = [k.replace("mask.fcn.logits", "mask_fcn_logits") for k in layer_keys]
layer_keys = [k.replace(".[mask].fcn", "mask_fcn") for k in layer_keys]
layer_keys = [k.replace("conv5.mask", "conv5_mask") for k in layer_keys]
# Keypoint R-CNN
layer_keys = [k.replace("kps.score.lowres", "kps_score_lowres") for k in layer_keys]
layer_keys = [k.replace("kps.score", "kps_score") for k in layer_keys]
layer_keys = [k.replace("conv.fcn", "conv_fcn") for k in layer_keys]
# Rename for our RPN structure
layer_keys = [k.replace("rpn.", "rpn.head.") for k in layer_keys]
key_map = {k: v for k, v in zip(original_keys, layer_keys)}
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights")
max_c2_key_size = max([len(k) for k in original_keys if "_momentum" not in k])
new_weights = OrderedDict()
for k in original_keys:
v = weights[k]
if "_momentum" in k:
continue
# if 'fc1000' in k:
# continue
w = torch.from_numpy(v)
# if "bn" in k:
# w = w.view(1, -1, 1, 1)
logger.info("C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]))
new_weights[key_map[k]] = w
return new_weights
def _load_c2_pickled_weights(file_path):
with open(file_path, "rb") as f:
data = pickle.load(f, encoding="latin1")
if "blobs" in data:
weights = data["blobs"]
else:
weights = data
return weights
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
logger = logging.getLogger(__name__)
logger.info("Remapping conv weights for deformable conv weights")
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*layer{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
logger.info("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
state_dict[new_key] = state_dict[old_key]
del state_dict[old_key]
return state_dict
_C2_STAGE_NAMES = {
"R-50": ["1.2", "2.3", "3.5", "4.2"],
"R-101": ["1.2", "2.3", "3.22", "4.2"],
}
def load_c2_format(cfg, f):
# TODO make it support other architectures
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body.replace("-C4", "").replace("-FPN", "")
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
# ***********************************
# for deformable convolutional layer
state_dict = _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg)
# ***********************************
return dict(model=state_dict)
| 6,944 | 39.144509 | 129 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/metric_logger.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import defaultdict
from collections import deque
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
return object.__getattr__(self, attr)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
| 1,714 | 25.796875 | 82 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/checkpoint.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from maskrcnn_benchmark.utils.c2_model_loading import load_c2_format
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.model_zoo import cache_url
class Checkpointer(object):
def __init__(
self,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.save_dir = save_dir
self.save_to_disk = save_to_disk
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
def save(self, name, **kwargs):
if not self.save_dir:
return
if not self.save_to_disk:
return
data = {}
data["model"] = self.model.state_dict()
if self.optimizer is not None:
data["optimizer"] = self.optimizer.state_dict()
if self.scheduler is not None:
data["scheduler"] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, "{}.pth".format(name))
self.logger.info("Saving checkpoint to {}".format(save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
def load(self, f=None, resume=False):
if self.has_checkpoint():
# override argument with existing checkpoint
f = self.get_checkpoint_file()
if not f:
# no checkpoint could be found
self.logger.info("No checkpoint found. Initializing model from scratch")
return {}
self.logger.info("Loading checkpoint from {}".format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint)
if resume:
if "optimizer" in checkpoint and self.optimizer:
self.logger.info("Loading optimizer from {}".format(f))
self.optimizer.load_state_dict(checkpoint.pop("optimizer"))
if "scheduler" in checkpoint and self.scheduler:
self.logger.info("Loading scheduler from {}".format(f))
self.scheduler.load_state_dict(checkpoint.pop("scheduler"))
# return any further checkpoint data
return checkpoint
def has_checkpoint(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
return os.path.exists(save_file)
def get_checkpoint_file(self):
save_file = os.path.join(self.save_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
last_saved = ""
return last_saved
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, "last_checkpoint")
with open(save_file, "w") as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f, map_location=torch.device("cpu"))
def _load_model(self, checkpoint):
load_state_dict(self.model, checkpoint.pop("model"))
class DetectronCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(DetectronCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(DetectronCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
| 4,813 | 33.385714 | 87 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/comm.py | # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# """
# This file contains primitives for multi-gpu communication.
# This is useful when doing distributed training.
# """
# import os
# import pickle
# import tempfile
# import time
# import torch
# import torch.distributed as dist
# # def get_world_size():
# # if not dist.is_initialized():
# # return 1
# # return dist.get_world_size()
# #
# #
# # def is_main_process():
# # if not dist.is_initialized():
# # return True
# # return dist.get_rank() == 0
# #
# # def get_rank():
# # if not dist.is_initialized():
# # return 0
# # return dist.get_rank()
# #
# # def synchronize():
# # """
# # Helper function to synchronize between multiple processes when
# # using distributed training
# # """
# # if not dist.is_initialized():
# # return
# # world_size = dist.get_world_size()
# # rank = dist.get_rank()
# # if world_size == 1:
# # return
# #
# # def _send_and_wait(r):
# # if rank == r:
# # tensor = torch.tensor(0, device="cuda")
# # else:
# # tensor = torch.tensor(1, device="cuda")
# # dist.broadcast(tensor, r)
# # while tensor.item() == 1:
# # time.sleep(1)
# #
# # _send_and_wait(0)
# # # now sync on the main process
# # _send_and_wait(1)
# #
# #
# def _encode(encoded_data, data):
# # gets a byte representation for the data
# encoded_bytes = pickle.dumps(data)
# # convert this byte string into a byte tensor
# storage = torch.ByteStorage.from_buffer(encoded_bytes)
# tensor = torch.ByteTensor(storage).to("cuda")
# # encoding: first byte is the size and then rest is the data
# s = tensor.numel()
# assert s <= 255, "Can't encode data greater than 255 bytes"
# # put the encoded data in encoded_data
# encoded_data[0] = s
# encoded_data[1 : (s + 1)] = tensor
# def _decode(encoded_data):
# size = encoded_data[0]
# encoded_tensor = encoded_data[1 : (size + 1)].to("cpu")
# return pickle.loads(bytearray(encoded_tensor.tolist()))
# # TODO try to use tensor in shared-memory instead of serializing to disk
# # this involves getting the all_gather to work
# def scatter_gather(data):
# """
# This function gathers data from multiple processes, and returns them
# in a list, as they were obtained from each process.
# This function is useful for retrieving data from multiple processes,
# when launching the code with torch.distributed.launch
# Note: this function is slow and should not be used in tight loops, i.e.,
# do not use it in the training loop.
# Arguments:
# data: the object to be gathered from multiple processes.
# It must be serializable
# Returns:
# result (list): a list with as many elements as there are processes,
# where each element i in the list corresponds to the data that was
# gathered from the process of rank i.
# """
# # strategy: the main process creates a temporary directory, and communicates
# # the location of the temporary directory to all other processes.
# # each process will then serialize the data to the folder defined by
# # the main process, and then the main process reads all of the serialized
# # files and returns them in a list
# if not dist.is_initialized():
# return [data]
# synchronize()
# # get rank of the current process
# rank = dist.get_rank()
# # the data to communicate should be small
# data_to_communicate = torch.empty(256, dtype=torch.uint8, device="cuda")
# if rank == 0:
# # manually creates a temporary directory, that needs to be cleaned
# # afterwards
# tmp_dir = tempfile.mkdtemp()
# _encode(data_to_communicate, tmp_dir)
# synchronize()
# # the main process (rank=0) communicates the data to all processes
# dist.broadcast(data_to_communicate, 0)
# # get the data that was communicated
# tmp_dir = _decode(data_to_communicate)
# # each process serializes to a different file
# file_template = "file{}.pth"
# tmp_file = os.path.join(tmp_dir, file_template.format(rank))
# torch.save(data, tmp_file)
# # synchronize before loading the data
# synchronize()
# # only the master process returns the data
# if rank == 0:
# data_list = []
# world_size = dist.get_world_size()
# for r in range(world_size):
# file_path = os.path.join(tmp_dir, file_template.format(r))
# d = torch.load(file_path)
# data_list.append(d)
# # cleanup
# os.remove(file_path)
# # cleanup
# os.rmdir(tmp_dir)
# return data_list
# def get_world_size():
# if not dist.is_available():
# print('distributed is not available')
# return 1
# if not dist.is_initialized():
# print('distributed is not initialized')
# return 1
# return dist.get_world_size()
# def get_rank():
# if not dist.is_available():
# return 0
# if not dist.is_initialized():
# return 0
# return dist.get_rank()
# def is_main_process():
# return get_rank() == 0
# def synchronize():
# """
# Helper function to synchronize (barrier) among all processes when
# using distributed training
# """
# if not dist.is_available():
# return
# if not dist.is_initialized():
# return
# world_size = dist.get_world_size()
# if world_size == 1:
# return
# dist.barrier()
# def all_gather(data):
# """
# Run all_gather on arbitrary picklable data (not necessarily tensors)
# Args:
# data: any picklable object
# Returns:
# list[data]: list of data gathered from each rank
# """
# world_size = get_world_size()
# if world_size == 1:
# return [data]
# # serialized to a Tensor
# buffer = pickle.dumps(data)
# storage = torch.ByteStorage.from_buffer(buffer)
# tensor = torch.ByteTensor(storage).to("cuda")
# # obtain Tensor size of each rank
# local_size = torch.IntTensor([tensor.numel()]).to("cuda")
# size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
# dist.all_gather(size_list, local_size)
# size_list = [int(size.item()) for size in size_list]
# max_size = max(size_list)
# # receiving Tensor from all ranks
# # we pad the tensor because torch all_gather does not support
# # gathering tensors of different shapes
# tensor_list = []
# for _ in size_list:
# tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
# if local_size != max_size:
# padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
# tensor = torch.cat((tensor, padding), dim=0)
# dist.all_gather(tensor_list, tensor)
# data_list = []
# for size, tensor in zip(size_list, tensor_list):
# buffer = tensor.cpu().numpy().tobytes()[:size]
# data_list.append(pickle.loads(buffer))
# return data_list
# def reduce_dict(input_dict, average=True):
# """
# Args:
# input_dict (dict): all the values will be reduced
# average (bool): whether to do average or sum
# Reduce the values in the dictionary from all processes so that process with rank
# 0 has the averaged results. Returns a dict with the same fields as
# input_dict, after reduction.
# """
# world_size = get_world_size()
# if world_size < 2:
# return input_dict
# with torch.no_grad():
# names = []
# values = []
# # sort the keys so that they are consistent across processes
# for k in sorted(input_dict.keys()):
# names.append(k)
# values.append(input_dict[k])
# values = torch.stack(values, dim=0)
# dist.reduce(values, dst=0)
# if dist.get_rank() == 0 and average:
# # only main process gets accumulated, so only divide by
# # world_size in this case
# values /= world_size
# reduced_dict = {k: v for k, v in zip(names, values)}
# return reduced_dict
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import time
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def scatter_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 11,687 | 29.83905 | 86 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/model_zoo.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import sys
try:
from torch.hub import _download_url_to_file
from torch.hub import urlparse
from torch.hub import HASH_REGEX
except ImportError:
from torch.utils.model_zoo import _download_url_to_file
from torch.utils.model_zoo import urlparse
from torch.utils.model_zoo import HASH_REGEX
from maskrcnn_benchmark.utils.comm import is_main_process
from maskrcnn_benchmark.utils.comm import synchronize
# very similar to https://github.com/pytorch/pytorch/blob/master/torch/utils/model_zoo.py
# but with a few improvements and modifications
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv("TORCH_HOME", "~/.torch"))
model_dir = os.getenv("TORCH_MODEL_ZOO", os.path.join(torch_home, "models"))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file | 3,044 | 48.918033 | 135 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/collect_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import PIL
from torch.utils.collect_env import get_pretty_env_info
def get_pil_version():
return "\n Pillow ({})".format(PIL.__version__)
def collect_env_info():
env_str = get_pretty_env_info()
env_str += get_pil_version()
return env_str
| 338 | 21.6 | 71 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/utils/model_serialization.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from collections import OrderedDict
import logging
import torch
from maskrcnn_benchmark.utils.imports import import_file
def align_and_update_state_dicts(model_state_dict, loaded_state_dict):
"""
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# loaded_key string, if it matches
match_matrix = [
len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys
]
match_matrix = torch.as_tensor(match_matrix).view(
len(current_keys), len(loaded_keys)
)
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_size = max([len(key) for key in current_keys]) if current_keys else 1
max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
logger = logging.getLogger(__name__)
for idx_new, idx_old in enumerate(idxs.tolist()):
if idx_old == -1:
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
model_state_dict[key] = loaded_state_dict[key_old]
logger.info(
log_str_template.format(
key,
max_size,
key_old,
max_size_loaded,
tuple(loaded_state_dict[key_old].shape),
)
)
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def load_state_dict(model, loaded_state_dict):
model_state_dict = model.state_dict()
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.")
align_and_update_state_dicts(model_state_dict, loaded_state_dict)
# use strict loading
model.load_state_dict(model_state_dict)
| 3,464 | 41.777778 | 91 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/build.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import logging
import torch.utils.data
from maskrcnn_benchmark.utils.comm import get_world_size
from maskrcnn_benchmark.utils.imports import import_file
from . import datasets as D
from . import samplers
from .collate_batch import BatchCollator
from .transforms import build_transforms
def build_dataset(cfg, dataset_list, transforms, dataset_catalog, is_train=True):
"""
Arguments:
dataset_list (list[str]): Contains the names of the datasets, i.e.,
coco_2014_trian, coco_2014_val, etc
transforms (callable): transforms to apply to each (image, target) sample
dataset_catalog (DatasetCatalog): contains the information on how to
construct a dataset.
is_train (bool): whether to setup the dataset for training or testing
"""
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list))
datasets = []
for dataset_name in dataset_list:
data = dataset_catalog.get(dataset_name)
factory = getattr(D, data["factory"])
args = data["args"]
# for COCODataset, we want to remove images without annotations
# during training
if data["factory"] == "COCODataset":
args["remove_images_without_annotations"] = is_train
args["transforms"] = transforms
args["ignore_difficult"] = cfg.DATASETS.IGNORE_DIFFICULT
# make dataset from factory
dataset = factory(**args)
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset = D.MixDataset(datasets, cfg.DATASETS.RATIOS)
# dataset = D.ConcatDataset(datasets)
return [dataset]
def make_data_sampler(dataset, shuffle, distributed):
if distributed:
return samplers.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
return sampler
def _quantize(x, bins):
bins = sorted(bins.copy())
quantized = list(map(lambda y: bisect.bisect_right(bins, y), x))
return quantized
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
img_info = dataset.get_img_info(i)
aspect_ratio = float(img_info["height"]) / float(img_info["width"])
aspect_ratios.append(aspect_ratio)
return aspect_ratios
def make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_batch, num_iters=None, start_iter=0
):
if aspect_grouping:
if not isinstance(aspect_grouping, (list, tuple)):
aspect_grouping = [aspect_grouping]
aspect_ratios = _compute_aspect_ratios(dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
batch_sampler = samplers.GroupedBatchSampler(
sampler, group_ids, images_per_batch, drop_uneven=False
)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_batch, drop_last=False
)
if num_iters is not None:
batch_sampler = samplers.IterationBasedBatchSampler(batch_sampler, num_iters, start_iter)
return batch_sampler
def make_data_loader(cfg, is_train=True, is_distributed=False, start_iter=0):
num_gpus = get_world_size()
if is_train:
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = True
num_iters = cfg.SOLVER.MAX_ITER
else:
images_per_batch = cfg.TEST.IMS_PER_BATCH
assert (
images_per_batch % num_gpus == 0
), "TEST.IMS_PER_BATCH ({}) must be divisible by the number "
"of GPUs ({}) used.".format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
shuffle = False if not is_distributed else True
num_iters = None
start_iter = 0
if images_per_gpu > 1:
logger = logging.getLogger(__name__)
logger.warning(
"When using more than one image per GPU you may encounter "
"an out-of-memory (OOM) error if your GPU does not have "
"sufficient memory. If this happens, you can reduce "
"SOLVER.IMS_PER_BATCH (for training) or "
"TEST.IMS_PER_BATCH (for inference). For training, you must "
"also adjust the learning rate and schedule length according "
"to the linear scaling rule. See for example: "
"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14"
)
# group images which have similar aspect ratio. In this case, we only
# group in two cases: those with width / height > 1, and the other way around,
# but the code supports more general grouping strategy
aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = paths_catalog.DatasetCatalog
dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg,dataset_list, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
'''
for i in range(20):
a=dataset[i]
ipdb.set_trace()
'''
sampler = make_data_sampler(dataset, shuffle, is_distributed)
batch_sampler = make_batch_data_sampler(
dataset, sampler, aspect_grouping, images_per_gpu, num_iters, start_iter
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
)
data_loaders.append(data_loader)
if is_train:
# during training, a single (possibly concatenated) data_loader is returned
assert len(data_loaders) == 1
return data_loaders[0]
return data_loaders
| 6,740 | 37.301136 | 143 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/tdtr.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
import os
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import (
CharPolygons,
SegmentationCharMask,
SegmentationMask,
)
from PIL import Image, ImageDraw
class Tdtr(object):
def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False):
self.use_charann = use_charann
self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)]
self.gts_dir = gts_dir
self.transforms = transforms
self.min_proposal_size = 2
self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz"
self.vis = False
self.ignore_difficult = ignore_difficult
if self.ignore_difficult and (self.gts_dir is not None) and 'train' in self.gts_dir:
self.image_lists = self.filter_image_lists()
def filter_image_lists(self):
new_image_lists = []
for img_path in self.image_lists:
has_positive = False
im_name = os.path.basename(img_path)
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
if not os.path.isfile(gt_path):
gt_path = os.path.join(
self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt"
)
lines = open(gt_path, 'r').readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "1":
continue
else:
has_positive = True
if has_positive:
new_image_lists.append(img_path)
return new_image_lists
def __getitem__(self, item):
im_name = os.path.basename(self.image_lists[item])
# print(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width, height = img.size
if self.gts_dir is not None:
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt(
gt_path, height, width
)
if words[0] == "":
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
target = BoxList(
boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann
)
if self.ignore_difficult:
labels = torch.from_numpy(np.array(labels))
else:
labels = torch.ones(len(boxes))
target.add_field("labels", labels)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
char_masks = SegmentationCharMask(
charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes)
)
target.add_field("char_masks", char_masks)
else:
target = None
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im = img.numpy().copy().transpose([1, 2, 0]) + [
102.9801,
115.9465,
122.7717,
]
new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB")
mask = target.extra_fields["masks"].polygons[0].convert("mask")
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB")
if self.use_charann:
m, _ = (
target.extra_fields["char_masks"]
.chars_boxes[0]
.convert("char_mask")
)
color = self.creat_color_map(37, 255)
color_map = color[m.numpy().astype(np.uint8)]
char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB")
char = Image.blend(char, new_im, 0.5)
else:
char = new_im
new = Image.blend(char, mask, 0.5)
img_draw = ImageDraw.Draw(new)
for box in target.bbox.numpy():
box = list(box)
box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save("./vis/char_" + im_name)
return img, target, self.image_lists[item]
def creat_color_map(self, n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
# def load_gt_from_txt(self, gt_path, height=None, width=None):
# words, boxes, charsboxes, segmentations, labels = [], [], [], [], []
# lines = open(gt_path).readlines()
# for line in lines:
# charbbs = []
# strs, loc = self.line2boxes(line)
# word = strs[0]
# if word == "###":
# labels.append(-1)
# continue
# else:
# labels.append(1)
# rect = list(loc[0])
# min_x = min(rect[::2]) - 1
# min_y = min(rect[1::2]) - 1
# max_x = max(rect[::2]) - 1
# max_y = max(rect[1::2]) - 1
# box = [min_x, min_y, max_x, max_y]
# segmentations.append([loc[0, :]])
# tindex = len(boxes)
# boxes.append(box)
# words.append(word)
# c_class = self.char2num(strs[1:])
# charbb = np.zeros((10,), dtype=np.float32)
# if loc.shape[0] > 1:
# for i in range(1, loc.shape[0]):
# charbb[:8] = loc[i, :]
# charbb[8] = c_class[i - 1]
# charbb[9] = tindex
# charbbs.append(charbb.copy())
# charsboxes.append(charbbs)
# num_boxes = len(boxes)
# if len(boxes) > 0:
# keep_boxes = np.zeros((num_boxes, 5))
# keep_boxes[:, :4] = np.array(boxes)
# keep_boxes[:, 4] = range(
# num_boxes
# ) # the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box
# if self.use_charann:
# return words, np.array(keep_boxes), charsboxes, segmentations, labels
# else:
# charbbs = np.zeros((10,), dtype=np.float32)
# for i in range(len(words)):
# charsboxes.append([charbbs])
# return words, np.array(keep_boxes), charsboxes, segmentations, labels
# else:
# words.append("")
# charbbs = np.zeros((10,), dtype=np.float32)
# return (
# words,
# np.zeros((1, 5), dtype=np.float32),
# [[charbbs]],
# [[np.zeros((8,), dtype=np.float32)]],
# labels
# )
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations, labels = [], [], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if self.ignore_difficult:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
# segmentations.append([loc[0, :]])
segmentations.append([[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]])
tindex = len(boxes)
boxes.append(box)
# words.append(word)
if word =='1':
labels.append(-1)
else:
labels.append(1)
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
else:
continue
num_boxes = len(boxes)
if len(boxes) > 0:
keep_boxes = np.zeros((num_boxes, 5))
keep_boxes[:, :4] = np.array(boxes)
keep_boxes[:, 4] = range(
num_boxes
)
# the 5th column is the box label,
# same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
charbbs = np.zeros((10,), dtype=np.float32)
if len(charsboxes) == 0:
for _ in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
words.append("")
charbbs = np.zeros((10,), dtype=np.float32)
return (
words,
np.zeros((1, 5), dtype=np.float32),
[[charbbs]],
[[np.zeros((8,), dtype=np.float32)]],
[1]
)
def line2boxes(self, line):
parts = line.strip().split(",")
return [parts[-1]], np.array([[float(x) for x in parts[:-1]]])
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
xmaxs = np.maximum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
ymins = np.minimum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
ymaxs = np.maximum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
return np.logical_and(
xmaxs - xmins > self.min_proposal_size,
ymaxs - ymins > self.min_proposal_size,
)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return (
xmaxs - xmins > self.min_proposal_size
and ymaxs - ymins > self.min_proposal_size
)
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width, height = img.size
img_info = {"im_name": im_name, "height": height, "width": width}
return img_info
| 11,925 | 39.020134 | 120 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/concat_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method for querying the sizes of the image
"""
def get_idxs(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_img_info(self, idx):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_img_info(sample_idx)
class MixDataset(object):
def __init__(self, datasets, ratios):
self.datasets = datasets
self.ratios = ratios
self.lengths = []
for dataset in self.datasets:
self.lengths.append(len(dataset))
self.lengths = np.array(self.lengths)
self.seperate_inds = []
s = 0
for i in self.ratios[:-1]:
s += i
self.seperate_inds.append(s)
def __len__(self):
return self.lengths.sum()
def __getitem__(self, item):
i = np.random.rand()
ind = bisect.bisect_right(self.seperate_inds, i)
b_ind = np.random.randint(self.lengths[ind])
return self.datasets[ind][b_ind]
| 1,498 | 26.254545 | 72 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/synthtext.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
import os
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import (
SegmentationCharMask,
SegmentationMask,
)
from PIL import Image, ImageDraw
class SynthtextDataset(object):
def __init__(self, use_charann, list_file_path, imgs_dir, gts_dir, transforms=None, ignore_difficult=False):
self.use_charann = use_charann
with open(list_file_path, "r") as list_file:
image_lines = list_file.readlines()
self.image_lists = [
os.path.join(imgs_dir, line.strip()) for line in image_lines
]
self.gt_lists = [
os.path.join(gts_dir, line.strip() + ".txt") for line in image_lines
]
self.filtered_gts = []
self.transforms = transforms
self.min_proposal_size = 2
self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz"
self.vis = False
self.ignore_difficult = ignore_difficult
def __getitem__(self, item):
while True:
img_path = self.image_lists[item]
try:
img = Image.open(img_path).convert("RGB")
break
except BaseException:
item += 1
im_name = os.path.basename(img_path)
width, height = img.size
gt_path = self.gt_lists[item]
words, boxes, charsbbs, segmentations = self.load_gt_from_txt(
gt_path, height, width
)
target = BoxList(
boxes[:, :4], img.size, mode="xyxy", use_char_ann=self.use_charann
)
classes = torch.ones(len(boxes))
target.add_field("labels", classes)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
if words[0] == "":
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
char_masks = SegmentationCharMask(
charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes)
)
target.add_field("char_masks", char_masks)
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im = img.numpy().copy().transpose([1, 2, 0]) + [
102.9801,
115.9465,
122.7717,
]
new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB")
mask = target.extra_fields["masks"].polygons[0].convert("mask")
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB")
if self.use_charann:
m, _ = (
target.extra_fields["char_masks"]
.chars_boxes[0]
.convert("char_mask")
)
color = self.creat_color_map(37, 255)
color_map = color[m.numpy().astype(np.uint8)]
char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB")
char = Image.blend(char, new_im, 0.5)
else:
char = new_im
new = Image.blend(char, mask, 0.5)
img_draw = ImageDraw.Draw(new)
for box in target.bbox.numpy():
box = list(box)
box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save("./vis/char_" + im_name)
return img, target, self.image_lists[item]
def creat_color_map(self, n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations = [], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "###":
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0, :]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i - 1]
charbb[9] = tindex
charbbs.append(charbb.copy())
else:
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes = len(boxes)
if len(boxes) > 0:
keep_boxes = np.zeros((num_boxes, 5))
keep_boxes[:, :4] = np.array(boxes)
keep_boxes[:, 4] = range(
num_boxes
)
# the 5th column is the box label,
# same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations
else:
charbbs = np.zeros((10,), dtype=np.float32)
for _ in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), [[charbbs]], segmentations
else:
words.append("")
charbbs = np.zeros((10,), dtype=np.float32)
return (
words,
np.zeros((1, 5), dtype=np.float32),
[[charbbs]],
[[np.zeros((8,), dtype=np.float32)]],
)
def line2boxes(self, line):
parts = line.strip().split(",")
if "\xef\xbb\xbf" in parts[0]:
parts[0] = parts[0][3:]
if "\ufeff" in parts[0]:
parts[0] = parts[0].replace("\ufeff", "")
x1 = np.array([int(float(x)) for x in parts[::9]])
y1 = np.array([int(float(x)) for x in parts[1::9]])
x2 = np.array([int(float(x)) for x in parts[2::9]])
y2 = np.array([int(float(x)) for x in parts[3::9]])
x3 = np.array([int(float(x)) for x in parts[4::9]])
y3 = np.array([int(float(x)) for x in parts[5::9]])
x4 = np.array([int(float(x)) for x in parts[6::9]])
y4 = np.array([int(float(x)) for x in parts[7::9]])
strs = parts[8::9]
loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose()
return strs, loc
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
xmaxs = np.maximum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
ymins = np.minimum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
ymaxs = np.maximum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
return np.logical_and(
xmaxs - xmins > self.min_proposal_size,
ymaxs - ymins > self.min_proposal_size,
)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return (
xmaxs - xmins > self.min_proposal_size
and ymaxs - ymins > self.min_proposal_size
)
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width, height = img.size
img_info = {"im_name": im_name, "height": height, "width": width}
return img_info
| 9,096 | 38.042918 | 116 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/scut.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
import os
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import (
CharPolygons,
SegmentationCharMask,
SegmentationMask,
)
from PIL import Image, ImageDraw, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ScutDataset(object):
def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False):
self.use_charann = use_charann
self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)]
self.gts_dir = gts_dir
self.transforms = transforms
self.min_proposal_size = 2
self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz"
self.vis = False
self.ignore_difficult = ignore_difficult
if self.ignore_difficult and 'train' in self.gts_dir:
self.image_lists = self.filter_image_lists()
def filter_image_lists(self):
new_image_lists = []
for img_path in self.image_lists:
has_positive = False
im_name = os.path.basename(img_path)
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
if not os.path.isfile(gt_path):
gt_path = os.path.join(
self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt"
)
lines = open(gt_path, 'r').readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "###":
continue
else:
has_positive = True
if has_positive:
new_image_lists.append(img_path)
return new_image_lists
def __getitem__(self, item):
im_name = os.path.basename(self.image_lists[item])
# print(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width, height = img.size
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt(
gt_path, height, width
)
if words[0] == "":
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
target = BoxList(boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann)
if self.ignore_difficult:
labels = torch.from_numpy(np.array(labels))
else:
labels = torch.ones(len(boxes))
target.add_field("labels", labels)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
char_masks = SegmentationCharMask(
charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes)
)
target.add_field("char_masks", char_masks)
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im = img.numpy().copy().transpose([1, 2, 0]) + [
102.9801,
115.9465,
122.7717,
]
new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB")
mask = target.extra_fields["masks"].polygons[0].convert("mask")
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB")
if self.use_charann:
m, _ = (
target.extra_fields["char_masks"]
.chars_boxes[0]
.convert("char_mask")
)
color = self.creat_color_map(37, 255)
color_map = color[m.numpy().astype(np.uint8)]
char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB")
char = Image.blend(char, new_im, 0.5)
else:
char = new_im
new = Image.blend(char, mask, 0.5)
img_draw = ImageDraw.Draw(new)
for box in target.bbox.numpy():
box = list(box)
box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save("./vis/char_" + im_name)
return img, target, self.image_lists[item]
def creat_color_map(self, n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
# def load_gt_from_txt(self, gt_path, height=None, width=None):
# words, boxes, charsboxes, segmentations, labels = [], [], [], [], []
# lines = open(gt_path).readlines()
# for line in lines:
# charbbs = []
# strs, loc = self.line2boxes(line)
# word = strs[0]
# if word == "###":
# labels.append(-1)
# continue
# else:
# labels.append(1)
# rect = list(loc[0])
# min_x = min(rect[::2]) - 1
# min_y = min(rect[1::2]) - 1
# max_x = max(rect[::2]) - 1
# max_y = max(rect[1::2]) - 1
# box = [min_x, min_y, max_x, max_y]
# segmentations.append([loc[0, :]])
# tindex = len(boxes)
# boxes.append(box)
# words.append(word)
# c_class = self.char2num(strs[1:])
# charbb = np.zeros((10,), dtype=np.float32)
# if loc.shape[0] > 1:
# for i in range(1, loc.shape[0]):
# charbb[:8] = loc[i, :]
# charbb[8] = c_class[i - 1]
# charbb[9] = tindex
# charbbs.append(charbb.copy())
# charsboxes.append(charbbs)
# num_boxes = len(boxes)
# if len(boxes) > 0:
# keep_boxes = np.zeros((num_boxes, 5))
# keep_boxes[:, :4] = np.array(boxes)
# keep_boxes[:, 4] = range(
# num_boxes
# ) # the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box
# if self.use_charann:
# return words, np.array(keep_boxes), charsboxes, segmentations, labels
# else:
# charbbs = np.zeros((10,), dtype=np.float32)
# if len(charsboxes) == 0:
# for i in range(len(words)):
# charsboxes.append([charbbs])
# return words, np.array(keep_boxes), charsboxes, segmentations, labels
# else:
# words.append("")
# charbbs = np.zeros((10,), dtype=np.float32)
# return (
# words,
# np.zeros((1, 5), dtype=np.float32),
# [[charbbs]],
# [[np.zeros((8,), dtype=np.float32)]],
# labels
# )
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations, labels = [], [], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "###":
if self.ignore_difficult:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0, :]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
labels.append(-1)
charbbs = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
else:
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0, :]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
labels.append(1)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i - 1]
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes = len(boxes)
if len(boxes) > 0:
keep_boxes = np.zeros((num_boxes, 5))
keep_boxes[:, :4] = np.array(boxes)
keep_boxes[:, 4] = range(
num_boxes
)
# the 5th column is the box label,
# same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
charbbs = np.zeros((10,), dtype=np.float32)
if len(charsboxes) == 0:
for _ in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
words.append("")
charbbs = np.zeros((10,), dtype=np.float32)
return (
words,
np.zeros((1, 5), dtype=np.float32),
[[charbbs]],
[[np.zeros((8,), dtype=np.float32)]],
[1]
)
def line2boxes(self, line):
parts = line.strip().split(",")
if "\xef\xbb\xbf" in parts[0]:
parts[0] = parts[0][3:]
if "\ufeff" in parts[0]:
parts[0] = parts[0].replace("\ufeff", "")
x1 = np.array([int(float(x)) for x in parts[::9]])
y1 = np.array([int(float(x)) for x in parts[1::9]])
x2 = np.array([int(float(x)) for x in parts[2::9]])
y2 = np.array([int(float(x)) for x in parts[3::9]])
x3 = np.array([int(float(x)) for x in parts[4::9]])
y3 = np.array([int(float(x)) for x in parts[5::9]])
x4 = np.array([int(float(x)) for x in parts[6::9]])
y4 = np.array([int(float(x)) for x in parts[7::9]])
strs = parts[8::9]
loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose()
return strs, loc
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
xmaxs = np.maximum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
ymins = np.minimum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
ymaxs = np.maximum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
return np.logical_and(
xmaxs - xmins > self.min_proposal_size,
ymaxs - ymins > self.min_proposal_size,
)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return (
xmaxs - xmins > self.min_proposal_size
and ymaxs - ymins > self.min_proposal_size
)
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width, height = img.size
img_info = {"im_name": im_name, "height": height, "width": width}
return img_info
| 13,327 | 39.510638 | 116 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/icdar.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
import os
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import (
SegmentationCharMask,
SegmentationMask,
)
from PIL import Image, ImageDraw
class IcdarDataset(object):
def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False):
self.use_charann = use_charann
self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)]
self.gts_dir = gts_dir
self.transforms = transforms
self.min_proposal_size = 2
self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz"
self.vis = False
self.ignore_difficult = ignore_difficult
if self.ignore_difficult and self.gts_dir is not None and 'train' in self.gts_dir:
self.image_lists = self.filter_image_lists()
def filter_image_lists(self):
new_image_lists = []
for img_path in self.image_lists:
has_positive = False
im_name = os.path.basename(img_path)
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
if not os.path.isfile(gt_path):
gt_path = os.path.join(
self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt"
)
lines = open(gt_path, 'r').readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "###":
continue
else:
has_positive = True
if has_positive:
new_image_lists.append(img_path)
return new_image_lists
def __getitem__(self, item):
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width, height = img.size
if self.gts_dir is not None:
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
if not os.path.isfile(gt_path):
gt_path = os.path.join(
self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt"
)
words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt(
gt_path, height, width
)
target = BoxList(
boxes[:, :4], img.size, mode="xyxy", use_char_ann=self.use_charann
)
if self.ignore_difficult:
labels = torch.from_numpy(np.array(labels))
else:
labels = torch.ones(len(boxes))
target.add_field("labels", labels)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
if words[0] == "":
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
char_masks = SegmentationCharMask(
charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes)
)
target.add_field("char_masks", char_masks)
else:
target = None
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im = img.numpy().copy().transpose([1, 2, 0]) + [
102.9801,
115.9465,
122.7717,
]
new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB")
mask = target.extra_fields["masks"].polygons[0].convert("mask")
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB")
if self.use_charann:
m, _ = (
target.extra_fields["char_masks"]
.chars_boxes[0]
.convert("char_mask")
)
color = self.creat_color_map(37, 255)
color_map = color[m.numpy().astype(np.uint8)]
char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB")
char = Image.blend(char, new_im, 0.5)
else:
char = new_im
new = Image.blend(char, mask, 0.5)
img_draw = ImageDraw.Draw(new)
for box in target.bbox.numpy():
box = list(box)
box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save("./vis/char_" + im_name)
return img, target, self.image_lists[item]
def creat_color_map(self, n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations, labels = [], [], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "###":
if self.ignore_difficult:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0, :]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
labels.append(-1)
charbbs = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
else:
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0, :]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
labels.append(1)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i - 1]
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes = len(boxes)
if len(boxes) > 0:
keep_boxes = np.zeros((num_boxes, 5))
keep_boxes[:, :4] = np.array(boxes)
keep_boxes[:, 4] = range(
num_boxes
)
# the 5th column is the box label,
# same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
charbbs = np.zeros((10,), dtype=np.float32)
if len(charsboxes) == 0:
for _ in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
words.append("")
charbbs = np.zeros((10,), dtype=np.float32)
return (
words,
np.zeros((1, 5), dtype=np.float32),
[[charbbs]],
[[np.zeros((8,), dtype=np.float32)]],
[1]
)
def line2boxes(self, line):
parts = line.strip().split(",")
if "\xef\xbb\xbf" in parts[0]:
parts[0] = parts[0][3:]
if "\ufeff" in parts[0]:
parts[0] = parts[0].replace("\ufeff", "")
x1 = np.array([int(float(x)) for x in parts[::9]])
y1 = np.array([int(float(x)) for x in parts[1::9]])
x2 = np.array([int(float(x)) for x in parts[2::9]])
y2 = np.array([int(float(x)) for x in parts[3::9]])
x3 = np.array([int(float(x)) for x in parts[4::9]])
y3 = np.array([int(float(x)) for x in parts[5::9]])
x4 = np.array([int(float(x)) for x in parts[6::9]])
y4 = np.array([int(float(x)) for x in parts[7::9]])
strs = parts[8::9]
loc = np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).transpose()
return strs, loc
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
xmaxs = np.maximum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
ymins = np.minimum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
ymaxs = np.maximum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
return np.logical_and(
xmaxs - xmins > self.min_proposal_size,
ymaxs - ymins > self.min_proposal_size,
)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return (
xmaxs - xmins > self.min_proposal_size
and ymaxs - ymins > self.min_proposal_size
)
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width, height = img.size
img_info = {"im_name": im_name, "height": height, "width": width}
return img_info
| 11,125 | 39.458182 | 120 | py |
MaskTextSpotterV3 | MaskTextSpotterV3-master/maskrcnn_benchmark/data/datasets/total_text.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
import os
import numpy as np
import torch
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.segmentation_mask import (
CharPolygons,
SegmentationCharMask,
SegmentationMask,
)
from PIL import Image, ImageDraw
class TotaltextDataset(object):
def __init__(self, use_charann, imgs_dir, gts_dir, transforms=None, ignore_difficult=False):
self.use_charann = use_charann
self.image_lists = [os.path.join(imgs_dir, img) for img in os.listdir(imgs_dir)]
self.gts_dir = gts_dir
self.transforms = transforms
self.min_proposal_size = 2
self.char_classes = "_0123456789abcdefghijklmnopqrstuvwxyz"
self.vis = False
self.ignore_difficult = ignore_difficult
if self.ignore_difficult and (self.gts_dir is not None) and 'train' in self.gts_dir:
self.image_lists = self.filter_image_lists()
def filter_image_lists(self):
new_image_lists = []
for img_path in self.image_lists:
has_positive = False
im_name = os.path.basename(img_path)
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
if not os.path.isfile(gt_path):
gt_path = os.path.join(
self.gts_dir, "gt_" + im_name.split(".")[0] + ".txt"
)
lines = open(gt_path, 'r').readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "###":
continue
else:
has_positive = True
if has_positive:
new_image_lists.append(img_path)
return new_image_lists
def __getitem__(self, item):
im_name = os.path.basename(self.image_lists[item])
# print(self.image_lists[item])
img = Image.open(self.image_lists[item]).convert("RGB")
width, height = img.size
if self.gts_dir is not None:
gt_path = os.path.join(self.gts_dir, im_name + ".txt")
words, boxes, charsbbs, segmentations, labels = self.load_gt_from_txt(
gt_path, height, width
)
if words[0] == "":
use_char_ann = False
else:
use_char_ann = True
if not self.use_charann:
use_char_ann = False
target = BoxList(
boxes[:, :4], img.size, mode="xyxy", use_char_ann=use_char_ann
)
if self.ignore_difficult:
labels = torch.from_numpy(np.array(labels))
else:
labels = torch.ones(len(boxes))
target.add_field("labels", labels)
masks = SegmentationMask(segmentations, img.size)
target.add_field("masks", masks)
char_masks = SegmentationCharMask(
charsbbs, words=words, use_char_ann=use_char_ann, size=img.size, char_num_classes=len(self.char_classes)
)
target.add_field("char_masks", char_masks)
else:
target = None
if self.transforms is not None:
img, target = self.transforms(img, target)
if self.vis:
new_im = img.numpy().copy().transpose([1, 2, 0]) + [
102.9801,
115.9465,
122.7717,
]
new_im = Image.fromarray(new_im.astype(np.uint8)).convert("RGB")
mask = target.extra_fields["masks"].polygons[0].convert("mask")
mask = Image.fromarray((mask.numpy() * 255).astype(np.uint8)).convert("RGB")
if self.use_charann:
m, _ = (
target.extra_fields["char_masks"]
.chars_boxes[0]
.convert("char_mask")
)
color = self.creat_color_map(37, 255)
color_map = color[m.numpy().astype(np.uint8)]
char = Image.fromarray(color_map.astype(np.uint8)).convert("RGB")
char = Image.blend(char, new_im, 0.5)
else:
char = new_im
new = Image.blend(char, mask, 0.5)
img_draw = ImageDraw.Draw(new)
for box in target.bbox.numpy():
box = list(box)
box = box[:2] + [box[2], box[1]] + box[2:] + [box[0], box[3]] + box[:2]
img_draw.line(box, fill=(255, 0, 0), width=2)
new.save("./vis/char_" + im_name)
return img, target, self.image_lists[item]
def creat_color_map(self, n_class, width):
splits = int(np.ceil(np.power((n_class * 1.0), 1.0 / 3)))
maps = []
for i in range(splits):
r = int(i * width * 1.0 / (splits - 1))
for j in range(splits):
g = int(j * width * 1.0 / (splits - 1))
for k in range(splits - 1):
b = int(k * width * 1.0 / (splits - 1))
maps.append([r, g, b])
return np.array(maps)
def __len__(self):
return len(self.image_lists)
# def load_gt_from_txt(self, gt_path, height=None, width=None):
# words, boxes, charsboxes, segmentations, labels = [], [], [], [], []
# lines = open(gt_path).readlines()
# for line in lines:
# charbbs = []
# strs, loc = self.line2boxes(line)
# word = strs[0]
# if word == "###":
# labels.append(-1)
# continue
# else:
# labels.append(1)
# rect = list(loc[0])
# min_x = min(rect[::2]) - 1
# min_y = min(rect[1::2]) - 1
# max_x = max(rect[::2]) - 1
# max_y = max(rect[1::2]) - 1
# box = [min_x, min_y, max_x, max_y]
# segmentations.append([loc[0, :]])
# tindex = len(boxes)
# boxes.append(box)
# words.append(word)
# c_class = self.char2num(strs[1:])
# charbb = np.zeros((10,), dtype=np.float32)
# if loc.shape[0] > 1:
# for i in range(1, loc.shape[0]):
# charbb[:8] = loc[i, :]
# charbb[8] = c_class[i - 1]
# charbb[9] = tindex
# charbbs.append(charbb.copy())
# charsboxes.append(charbbs)
# num_boxes = len(boxes)
# if len(boxes) > 0:
# keep_boxes = np.zeros((num_boxes, 5))
# keep_boxes[:, :4] = np.array(boxes)
# keep_boxes[:, 4] = range(
# num_boxes
# ) # the 5th column is the box label,same as the 10th column of all charsboxes which belong to the box
# if self.use_charann:
# return words, np.array(keep_boxes), charsboxes, segmentations, labels
# else:
# charbbs = np.zeros((10,), dtype=np.float32)
# for i in range(len(words)):
# charsboxes.append([charbbs])
# return words, np.array(keep_boxes), charsboxes, segmentations, labels
# else:
# words.append("")
# charbbs = np.zeros((10,), dtype=np.float32)
# return (
# words,
# np.zeros((1, 5), dtype=np.float32),
# [[charbbs]],
# [[np.zeros((8,), dtype=np.float32)]],
# labels
# )
def load_gt_from_txt(self, gt_path, height=None, width=None):
words, boxes, charsboxes, segmentations, labels = [], [], [], [], []
lines = open(gt_path).readlines()
for line in lines:
charbbs = []
strs, loc = self.line2boxes(line)
word = strs[0]
if word == "###":
if self.ignore_difficult:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
# segmentations.append([loc[0, :]])
segmentations.append([[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
labels.append(-1)
charbbs = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
else:
continue
else:
rect = list(loc[0])
min_x = min(rect[::2]) - 1
min_y = min(rect[1::2]) - 1
max_x = max(rect[::2]) - 1
max_y = max(rect[1::2]) - 1
box = [min_x, min_y, max_x, max_y]
segmentations.append([loc[0, :]])
tindex = len(boxes)
boxes.append(box)
words.append(word)
labels.append(1)
c_class = self.char2num(strs[1:])
charbb = np.zeros((10,), dtype=np.float32)
if loc.shape[0] > 1:
for i in range(1, loc.shape[0]):
charbb[:8] = loc[i, :]
charbb[8] = c_class[i - 1]
charbb[9] = tindex
charbbs.append(charbb.copy())
charsboxes.append(charbbs)
num_boxes = len(boxes)
if len(boxes) > 0:
keep_boxes = np.zeros((num_boxes, 5))
keep_boxes[:, :4] = np.array(boxes)
keep_boxes[:, 4] = range(
num_boxes
)
# the 5th column is the box label,
# same as the 10th column of all charsboxes which belong to the box
if self.use_charann:
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
charbbs = np.zeros((10,), dtype=np.float32)
if len(charsboxes) == 0:
for _ in range(len(words)):
charsboxes.append([charbbs])
return words, np.array(keep_boxes), charsboxes, segmentations, labels
else:
words.append("")
charbbs = np.zeros((10,), dtype=np.float32)
return (
words,
np.zeros((1, 5), dtype=np.float32),
[[charbbs]],
[[np.zeros((8,), dtype=np.float32)]],
[1]
)
def line2boxes(self, line):
parts = line.strip().split(",")
return [parts[-1]], np.array([[float(x) for x in parts[:-1]]])
def check_charbbs(self, charbbs):
xmins = np.minimum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
xmaxs = np.maximum.reduce(
[charbbs[:, 0], charbbs[:, 2], charbbs[:, 4], charbbs[:, 6]]
)
ymins = np.minimum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
ymaxs = np.maximum.reduce(
[charbbs[:, 1], charbbs[:, 3], charbbs[:, 5], charbbs[:, 7]]
)
return np.logical_and(
xmaxs - xmins > self.min_proposal_size,
ymaxs - ymins > self.min_proposal_size,
)
def check_charbb(self, charbb):
xmins = min(charbb[0], charbb[2], charbb[4], charbb[6])
xmaxs = max(charbb[0], charbb[2], charbb[4], charbb[6])
ymins = min(charbb[1], charbb[3], charbb[5], charbb[7])
ymaxs = max(charbb[1], charbb[3], charbb[5], charbb[7])
return (
xmaxs - xmins > self.min_proposal_size
and ymaxs - ymins > self.min_proposal_size
)
def char2num(self, chars):
## chars ['h', 'e', 'l', 'l', 'o']
nums = [self.char_classes.index(c.lower()) for c in chars]
return nums
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
im_name = os.path.basename(self.image_lists[item])
img = Image.open(self.image_lists[item])
width, height = img.size
img_info = {"im_name": im_name, "height": height, "width": width}
return img_info
| 12,866 | 39.589905 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.